From 19164731eb1f0d434bf79ad7afa3b787e6f30cdb Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 29 Apr 2026 21:50:21 +0000 Subject: [PATCH 1/5] android: synchronous IPC close on JS context tear-down MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds NodeJSIPC.close() — a synchronous terminal teardown — and calls it from ComapeoCoreModule.OnDestroy in place of the fire-and-forget disconnect(). Why: rpc-reflector subscriptions registered by the JS client during a session are stored on the backend keyed by event name and bound to the underlying socket connection; the cleanup path runs only when the backend observes the socket closing. The previous wiring closed the socket from a launched coroutine, so the new OnCreate (which fires immediately on JS context rebuild — Expo modules are bound to the AppContext, whose lifetime is the JS runtime, not the Activity) could open a fresh connection before the old socket's FD was released. The backend's per-connection cleanup then ran against the wrong (or no) connection, leaving the prior session's listeners attached to MapeoManager and emitting events to a peer that no longer existed. close() closes the LocalSocket on the calling thread and cancels the coroutine scope so receive/send jobs don't linger across reloads. It's purely additive: disconnect() is unchanged and still used by the FGS side (NodeJSService) and by internal IOException recovery. The OnCreate / OnDestroy block now carries a comment documenting that these hooks bind to the JS runtime lifetime (verified via Expo's android-lifecycle-listeners docs), distinguishing them from the activity-level OnActivityEntersForeground hook which is kept for FGS-respawn-while-backgrounded recovery. Co-Authored-By: Claude Opus 4.7 --- .../com/comapeo/core/ComapeoCoreModule.kt | 24 +++++++++++-- .../main/java/com/comapeo/core/NodeJSIPC.kt | 35 +++++++++++++++++++ 2 files changed, 57 insertions(+), 2 deletions(-) diff --git a/android/src/main/java/com/comapeo/core/ComapeoCoreModule.kt b/android/src/main/java/com/comapeo/core/ComapeoCoreModule.kt index 8692a31..6496ae9 100644 --- a/android/src/main/java/com/comapeo/core/ComapeoCoreModule.kt +++ b/android/src/main/java/com/comapeo/core/ComapeoCoreModule.kt @@ -100,6 +100,17 @@ class ComapeoCoreModule : Module() { } override fun definition() = ModuleDefinition { + // OnCreate / OnDestroy are bound to the Expo `AppContext`, whose + // lifetime is the React Native JS runtime — not the Android + // Activity. They fire on every JS context tear-down/rebuild + // (dev reload, `DevSettings.reload()`, fast-refresh full + // reload), which is exactly the reconnect boundary we want for + // the RPC sockets: the previous JS session's rpc-reflector + // client is gone, but its event-listener subscriptions are + // still attached on the backend's MapeoManager. Closing the + // socket here forces the backend to observe the disconnect and + // tear those subscriptions down before the next JS session + // opens a fresh connection from a new OnCreate. OnCreate { val socketFile = File(appContext.persistentFilesDirectory, ComapeoCoreService.COMAPEO_SOCKET_FILENAME) @@ -190,8 +201,17 @@ class ComapeoCoreModule : Module() { } OnDestroy { - ipc.disconnect() - controlIpc.disconnect() + // Synchronous close so the AF_UNIX socket FD is released on + // this thread before OnDestroy returns. The fire-and-forget + // disconnect() would let the next OnCreate open a new + // connection while the old socket is still alive in a + // launched coroutine; a synchronous close means the backend + // sees EOF on the previous session before it accepts the + // new connection, so rpc-reflector's per-connection cleanup + // path (server.close → removeListener for every prior + // subscription) runs against the right connection. + ipc.close() + controlIpc.close() } OnActivityEntersForeground { diff --git a/android/src/main/java/com/comapeo/core/NodeJSIPC.kt b/android/src/main/java/com/comapeo/core/NodeJSIPC.kt index f49ab27..79828bd 100644 --- a/android/src/main/java/com/comapeo/core/NodeJSIPC.kt +++ b/android/src/main/java/com/comapeo/core/NodeJSIPC.kt @@ -8,6 +8,7 @@ import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.Job import kotlinx.coroutines.SupervisorJob +import kotlinx.coroutines.cancel import kotlinx.coroutines.cancelAndJoin import kotlinx.coroutines.channels.Channel import kotlinx.coroutines.delay @@ -231,6 +232,40 @@ class NodeJSIPC( connect() sendChannel.trySend(message) } + + /** + * Synchronous, terminal teardown for module-destroy lifecycle. + * + * Unlike [disconnect] — which launches a coroutine and returns + * immediately — [close] closes the underlying [LocalSocket] on the + * calling thread before returning, so the peer (the Node.js backend + * over the AF_UNIX socket) observes EOF before [close] returns. + * That ordering is the point: it lets a caller running in a tight + * lifecycle window (e.g. the Expo module's `OnDestroy`, immediately + * before a fresh `OnCreate` opens a new connection) guarantee the + * old socket is gone before the new one is opened, so the backend's + * per-connection state (notably any rpc-reflector event-listener + * subscriptions registered against the long-lived handler) is torn + * down on the same reload that starts a fresh client. + * + * After [close] the instance must not be reused: the scope is + * cancelled and a fresh [NodeJSIPC] should be constructed. + */ + fun close() { + // Cancel the scope first so the receive/send coroutines stop + // touching the streams as we close them. socket.close() will + // also unblock any in-flight read/write with an IOException; + // the cancelled scope means the coroutines exit without + // re-entering disconnect(). + scope.cancel() + sendChannel.close() + try { dataOutputStream?.close() } catch (_: Exception) {} + try { dataInputStream?.close() } catch (_: Exception) {} + if (::socket.isInitialized) { + try { socket.close() } catch (_: Exception) {} + } + state.value = State.Disconnected + } } private suspend fun connectWithRetry( From 352319e283b4c3f4a286228b4ca398deb9d40bbe Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 29 Apr 2026 22:11:08 +0000 Subject: [PATCH 2/5] backend+ios: emit 'close' on socket teardown; observe RN reload on iOS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three coupled fixes for the same underlying issue: rpc-reflector event-listener subscriptions registered against MapeoManager during a JS session must be torn down when that session ends, on every platform. backend: SocketMessagePort.close() now emits 'close' The constructor wires `framedStream.on('close')` to call `SocketMessagePort.close()`, and ComapeoRpcServer's connection handler attaches `messagePort.on('close', () => server.close())` to drive rpc-reflector's per-connection cleanup. close() never emitted the event, so the server.close() listener was dead — every reconnect leaked the previous session's listeners onto MapeoManager. One-line fix; the rest of the wiring was already correct. ios: JSReloadObserver Expo's `OnDestroy` doesn't fire on iOS JS reload (expo/expo#33655), so the lifecycle-only path that works on Android leaves stale sockets attached on every reload here. JSReloadObserver subscribes to `RCTBridgeWillReloadNotification` (classic arch) and `RCTJavaScriptWillStartLoadingNotification` (bridgeless / new arch) and runs a callback synchronously on the posting thread. ComapeoCoreModule installs one in OnCreate that calls `ipc?.disconnect()` — synchronous on iOS, so the backend observes EOF before the notification post returns and the rpc-reflector cleanup runs against the right connection. The notification names are hard-coded as strings so the file builds in the macOS-only swift-test target (React isn't a dependency there). Tests backend/test/message-port.test.js (node --test, 4 tests): - SocketMessagePort emits 'close' once when the underlying socket closes (regression-fails without the emit) - close() is idempotent - end-to-end: rpc-reflector listener attached to a fake handler is removed when the socket closes — the contract that was silently broken before this commit - smoke test for the in-test pair() helper ios/Tests/JSReloadObserverTests.swift (Swift Package, macOS): - fires for every observed name; ignores others - deinit detaches (so leaked module instances on iOS reload don't keep firing into a torn-down ipc reference) - callback runs synchronously on the posting thread - default names cover both classic and bridgeless reload signals - integration test wires JSReloadObserver to a live NodeJSIPC + MockNodeServer and verifies posting the notification disconnects the client and the server reads EOF android NodeJSIPCTest.closeReleasesSocketSynchronously (instrumented): - blocks the mock server on `read()` of the connection, calls `ipc.close()`, and asserts the read unblocks with EOF inside 1 s. Pins the synchronous-close contract and would catch a regression to the fire-and-forget disconnect path that the previous commit removed from OnDestroy. Co-Authored-By: Claude Opus 4.7 --- .../java/com/comapeo/core/NodeJSIPCTest.kt | 70 ++++++ backend/lib/message-port.js | 11 + backend/package.json | 1 + backend/test/message-port.test.js | 173 ++++++++++++++ ios/ComapeoCoreModule.swift | 27 +++ ios/JSReloadObserver.swift | 76 +++++++ ios/Package.swift | 1 + ios/Tests/JSReloadObserverTests.swift | 214 ++++++++++++++++++ 8 files changed, 573 insertions(+) create mode 100644 backend/test/message-port.test.js create mode 100644 ios/JSReloadObserver.swift create mode 100644 ios/Tests/JSReloadObserverTests.swift diff --git a/android/src/androidTest/java/com/comapeo/core/NodeJSIPCTest.kt b/android/src/androidTest/java/com/comapeo/core/NodeJSIPCTest.kt index 65ef888..3620493 100644 --- a/android/src/androidTest/java/com/comapeo/core/NodeJSIPCTest.kt +++ b/android/src/androidTest/java/com/comapeo/core/NodeJSIPCTest.kt @@ -288,6 +288,76 @@ class NodeJSIPCTest { Thread.sleep(500) } + /** + * `close()` is the synchronous teardown path used from + * `ComapeoCoreModule.OnDestroy` (the Expo lifecycle hook that fires on + * React Native JS context reload). The contract — distinct from + * `disconnect()` — is that the underlying socket FD is released on the + * calling thread before `close()` returns, so the peer (the Node.js + * backend over AF_UNIX) observes EOF before the next `OnCreate` opens a + * fresh connection. That ordering is what lets the backend's + * per-connection rpc-reflector cleanup run against the prior session + * rather than the new one. + * + * This test pins that contract: after `close()` returns, the server's + * blocking read on the same connection unblocks immediately with EOF + * (`-1` from `DataInputStream.read()`). + */ + @Test + fun closeReleasesSocketSynchronously() { + val connected = CountDownLatch(1) + val readResult = java.util.concurrent.atomic.AtomicInteger(Int.MIN_VALUE) + val readReturned = CountDownLatch(1) + + startMockServer { input, _ -> + connected.countDown() + // Block until the client closes its end. Without + // synchronous close on the client, this read would only + // unblock after the OS GC's the orphaned fd — racy and + // user-visible-slow. + try { + val n = input.read() + readResult.set(n) + } catch (e: IOException) { + // Some kernels surface peer-close as IOException rather + // than EOF; either is a valid EOF signal — treat both + // as "peer closed". + readResult.set(-1) + } + readReturned.countDown() + } + + val ipc = NodeJSIPC(socketFile) { msg -> receivedMessages.add(msg) } + assertTrue("Should connect within 10s", connected.await(10, TimeUnit.SECONDS)) + // Let the IPC settle into Connected and start its receive loop + // before we close — otherwise we're testing a connect-cancel + // race rather than steady-state synchronous close. + Thread.sleep(200) + + // The synchronous-close contract: the moment close() returns, + // the server's blocking read must already have observed EOF. + // We give the kernel a generous 1s window to deliver FIN — far + // longer than necessary on a local AF_UNIX socket but short + // enough that a regression to the old fire-and-forget close + // (which depends on the launched coroutine eventually running + // on Dispatchers.IO) shows up as a test failure. + ipc.close() + + assertTrue( + "Server-side read must unblock with EOF within 1s of close() returning. " + + "Regression: if close() reverts to a launched-coroutine teardown, the " + + "FD release happens after the JS reload has already opened a new " + + "connection and the rpc-reflector cleanup runs against the wrong " + + "connection.", + readReturned.await(1, TimeUnit.SECONDS) + ) + assertEquals( + "Server's read must return -1 (EOF) — peer closed cleanly.", + -1, + readResult.get() + ) + } + @Test fun handlesServerDisconnect() { val connected = CountDownLatch(1) diff --git a/backend/lib/message-port.js b/backend/lib/message-port.js index d77621d..5da64bf 100644 --- a/backend/lib/message-port.js +++ b/backend/lib/message-port.js @@ -100,5 +100,16 @@ export class SocketMessagePort extends TypedEmitter { this.#state = "closed"; this.#queue.length = 0; this.#framedStream.destroy(); + // Emit after `#state = "closed"` and `#framedStream.destroy()` so a + // listener that synchronously inspects state or writes a final + // postMessage observes the "closed" state and the destroyed + // stream. Consumers (notably `ComapeoRpcServer`, which wires + // `messagePort.on("close", () => server.close())` to remove every + // RPC event-listener subscription registered against + // `MapeoManager` for this connection) rely on this event firing + // exactly once when the underlying socket goes away — without it + // the rpc-reflector cleanup never runs, and listeners accumulate + // across client reconnects (e.g. on every React Native JS reload). + this.emit("close"); } } diff --git a/backend/package.json b/backend/package.json index 7aec498..6552377 100644 --- a/backend/package.json +++ b/backend/package.json @@ -5,6 +5,7 @@ "type": "module", "scripts": { "build": "rollup -c", + "test": "node --test \"test/*.test.js\"", "types": "tsc" }, "keywords": [], diff --git a/backend/test/message-port.test.js b/backend/test/message-port.test.js new file mode 100644 index 0000000..010fe7b --- /dev/null +++ b/backend/test/message-port.test.js @@ -0,0 +1,173 @@ +import { test } from "node:test"; +import assert from "node:assert/strict"; +import { Duplex } from "node:stream"; +import FramedStream from "framed-stream"; +import { Buffer } from "node:buffer"; + +import { SocketMessagePort } from "../lib/message-port.js"; + +/** + * Builds a pair of connected duplex streams to stand in for a real + * net.Socket pair. Anything written to `a` is readable from `b` and vice + * versa, and destroying either side propagates EOF + close to the peer + * — same as a real AF_UNIX socket pair, which is what the backend + * connects to in production. + * + * Hand-rolled (rather than using `net.createServer` over a temp socket + * file) so the test stays in-process, runs without filesystem + * permissions, and finishes in milliseconds. + * + * @returns {[Duplex, Duplex]} + */ +function pair() { + /** @type {Duplex} */ + let a; + /** @type {Duplex} */ + let b; + a = new Duplex({ + read() {}, + write(chunk, _enc, cb) { + b.push(chunk); + cb(); + }, + final(cb) { + b.push(null); + cb(); + }, + destroy(_err, cb) { + // Mirrors AF_UNIX behaviour: closing one side delivers EOF to + // the peer, which then sees its readable end end and closes. + if (!b.destroyed) b.destroy(); + cb(null); + }, + }); + b = new Duplex({ + read() {}, + write(chunk, _enc, cb) { + a.push(chunk); + cb(); + }, + final(cb) { + a.push(null); + cb(); + }, + destroy(_err, cb) { + if (!a.destroyed) a.destroy(); + cb(null); + }, + }); + return [a, b]; +} + +test("SocketMessagePort emits 'close' once when the underlying socket closes", async () => { + const [serverSide, clientSide] = pair(); + const port = new SocketMessagePort(serverSide); + port.start(); + + let closeCount = 0; + port.on("close", () => { + closeCount++; + }); + + // Simulate the peer disconnecting (e.g. RN reload tears down the + // socket from the client side). The frame stream observes 'close' + // and our SocketMessagePort.close() must propagate it. + clientSide.destroy(); + + // Wait one microtask cycle for the close cascade to complete. + await new Promise((resolve) => setImmediate(resolve)); + + assert.equal(closeCount, 1, "close must fire exactly once"); +}); + +test("SocketMessagePort emits 'close' when close() is called directly", async () => { + const [serverSide] = pair(); + const port = new SocketMessagePort(serverSide); + port.start(); + + let closeCount = 0; + port.on("close", () => { + closeCount++; + }); + + port.close(); + // Calling close again must not emit another event — the close handler + // in ComapeoRpcServer attaches `server.close()` which is idempotent + // but we still don't want spurious double-fires. + port.close(); + + await new Promise((resolve) => setImmediate(resolve)); + + assert.equal(closeCount, 1, "close must be idempotent and fire at most once"); +}); + +test( + "ComapeoRpcServer-style wiring: handler listeners are removed when the socket closes", + async () => { + // This is the contract the backend relies on for stale-listener cleanup + // across RN reloads. We don't pull in @comapeo/ipc here — its + // createMapeoServer wraps rpc-reflector but the relevant invariant + // (server.close() removes listeners attached to the handler) is a + // property of rpc-reflector itself, exercised here directly so the + // test runs in isolation without the full Mapeo manager surface. + const { createServer } = await import("rpc-reflector"); + const { EventEmitter } = await import("node:events"); + + const handler = new EventEmitter(); + const [serverSide, clientSide] = pair(); + + const messagePort = new SocketMessagePort(serverSide); + messagePort.start(); + + // The shape ComapeoRpcServer constructs. + const server = createServer(handler, messagePort); + messagePort.on("close", () => server.close()); + + // Drive a subscription onto `handler` by sending an [ON, ...] frame + // through the SocketMessagePort, the same way an rpc-reflector + // client would after a `clientApi.on('progress', cb)` call. + // msgType.ON === 2 in rpc-reflector/lib/constants.js + // (REQUEST=0, RESPONSE=1, ON=2, OFF=3, EMIT=4). + const ON = 2; + const onFrame = Buffer.from(JSON.stringify([ON, "progress", []])); + const lengthPrefix = Buffer.alloc(4); + lengthPrefix.writeUInt32LE(onFrame.length, 0); + clientSide.write(Buffer.concat([lengthPrefix, onFrame])); + + // Give the server a tick to install its listener on `handler`. + await new Promise((resolve) => setImmediate(resolve)); + assert.equal( + handler.listenerCount("progress"), + 1, + "rpc-reflector should attach exactly one listener after [ON, ...]", + ); + + // Now simulate the client going away — what RN reload looks like + // from the backend's perspective once the IPC socket closes. + clientSide.destroy(); + await new Promise((resolve) => setImmediate(resolve)); + + assert.equal( + handler.listenerCount("progress"), + 0, + "listener must be removed once the socket closes (regression: " + + "without SocketMessagePort.emit('close'), the cleanup path " + + "in ComapeoRpcServer never runs and listeners leak)", + ); + }, +); + +// FramedStream is the one piece we lean on directly — make sure the +// test harness's `pair()` actually exercises it the way production does +// (length-prefixed framing, both sides observing close). +test("smoke: FramedStream wired through pair() round-trips a message", async () => { + const [a, b] = pair(); + const fa = new FramedStream(a); + const fb = new FramedStream(b); + + const received = new Promise((resolve) => fb.once("data", resolve)); + fa.write(Buffer.from("hello")); + + const buf = await received; + assert.equal(buf.toString(), "hello"); +}); diff --git a/ios/ComapeoCoreModule.swift b/ios/ComapeoCoreModule.swift index 030b55d..9dc1829 100644 --- a/ios/ComapeoCoreModule.swift +++ b/ios/ComapeoCoreModule.swift @@ -2,6 +2,13 @@ import ExpoModulesCore public class ComapeoCoreModule: Module { private var ipc: NodeJSIPC? + /// Observes RN-posted reload notifications and disconnects the IPC + /// when the JS context is about to be torn down. Required because + /// Expo's `OnDestroy` does not fire on iOS reload (expo/expo#33655), + /// so the lifecycle-only path that works on Android leaves stale + /// sockets attached on every reload here. See `JSReloadObserver` + /// for the full rationale and choice of notification names. + private var reloadObserver: JSReloadObserver? // MARK: - Testable seams // @@ -35,6 +42,18 @@ public class ComapeoCoreModule: Module { self?.sendEvent("message", ["data": message]) } + // Subscribe to RN's reload notifications so the IPC closes + // before a fresh JS context opens its own connection. This + // is the iOS analogue of Android's `OnDestroy { close() }` + // — necessary because Expo's `OnDestroy` does not fire on + // iOS JS reload (expo/expo#33655). `disconnect()` is + // already synchronous on iOS (`shutdown(2)` → join receive + // loop → `close(2)`), so the backend observes EOF before + // the notification handler returns. + self.reloadObserver = JSReloadObserver { [weak self] in + self?.ipc?.disconnect() + } + // Observe service state changes. `onStateChange` is a single-slot // callback — assigning here replaces any previous observer. In normal // app operation only one ComapeoCoreModule instance is alive at a time, @@ -65,6 +84,14 @@ public class ComapeoCoreModule: Module { } OnDestroy { + // OnDestroy is unreliable on iOS reload (expo/expo#33655) — + // the equivalent teardown is wired via `reloadObserver` + // above. This block still runs on the regular paths it + // does fire on (e.g. final module deinit during process + // exit) and is kept idempotent so the two paths can race + // without harm: `disconnect()` early-returns when state + // is already `.disconnecting`/`.disconnected`. + self.reloadObserver = nil self.ipc?.disconnect() self.ipc = nil } diff --git a/ios/JSReloadObserver.swift b/ios/JSReloadObserver.swift new file mode 100644 index 0000000..d75eeaa --- /dev/null +++ b/ios/JSReloadObserver.swift @@ -0,0 +1,76 @@ +import Foundation + +/// Observes the NSNotifications React Native posts when the JS runtime is +/// about to be torn down (developer reload, `DevSettings.reload()`, +/// fast-refresh full reload), and runs a caller-supplied closure on each +/// such event. +/// +/// Why this exists separately from Expo's `OnDestroy { ... }`: +/// `OnDestroy` does not fire on iOS when the JS context reloads — see +/// [expo/expo#33655](https://github.com/expo/expo/issues/33655). On +/// reload, Expo creates a fresh module instance via `OnCreate` but the +/// previous instance is leaked, with `OnDestroy` never invoked. That +/// leaves any IPC socket the previous instance owned still connected, +/// so the Node.js backend keeps emitting events to a peer that no +/// longer has anyone listening — and any rpc-reflector subscriptions +/// the previous JS session registered against `MapeoManager` stay +/// attached to the long-lived handler. +/// +/// Subscribing here gives us an iOS-side reload signal independent of +/// `OnDestroy`. Each living module instance disconnects its own IPC +/// when the notification fires, so the backend observes EOF on every +/// stale connection at the moment reload begins, regardless of whether +/// or when `OnDestroy` eventually runs. +/// +/// Notification choice: +/// - `RCTBridgeWillReloadNotification` is posted by RCTBridge in +/// classic-architecture iOS apps when a reload is initiated. +/// - `RCTJavaScriptWillStartLoadingNotification` is posted earlier in +/// the reload sequence on both architectures and is the most +/// reliable signal under bridgeless / new architecture, where +/// bridge-specific notifications are not always emitted. +/// +/// Both are observed; whichever lands first triggers the close. The +/// callback is idempotent on `NodeJSIPC` so a double-fire is harmless. +/// +/// The notification names are intentionally hard-coded as strings +/// rather than imported from React-Core so this file builds in the +/// macOS-only `ComapeoCore` Swift Package target (used by the +/// `ios/Tests/` suite). React isn't a dependency of that target. +final class JSReloadObserver { + + /// Notification names that React Native posts on JS context + /// teardown. Tests can override this to inject a synthetic name + /// when they don't want to drive a real RN notification. + static let defaultNotificationNames: [Notification.Name] = [ + Notification.Name("RCTBridgeWillReloadNotification"), + Notification.Name("RCTJavaScriptWillStartLoadingNotification"), + ] + + private let center: NotificationCenter + private var observers: [NSObjectProtocol] = [] + + init( + notificationNames: [Notification.Name] = JSReloadObserver.defaultNotificationNames, + center: NotificationCenter = .default, + onReload: @escaping () -> Void + ) { + self.center = center + // Run the callback synchronously on the posting thread (queue: nil) + // so the IPC disconnect completes before RCT continues with + // bridge teardown — same ordering guarantee Android's OnDestroy + // gives us, only achieved here via the notification rather than + // a lifecycle hook. + self.observers = notificationNames.map { name in + center.addObserver(forName: name, object: nil, queue: nil) { _ in + onReload() + } + } + } + + deinit { + for observer in observers { + center.removeObserver(observer) + } + } +} diff --git a/ios/Package.swift b/ios/Package.swift index 89cc43a..d75aaaf 100644 --- a/ios/Package.swift +++ b/ios/Package.swift @@ -25,6 +25,7 @@ let package = Package( "NodeJSIPC.swift", "NodeJSService.swift", "ControlFrame.swift", + "JSReloadObserver.swift", "Log.swift", ] ), diff --git a/ios/Tests/JSReloadObserverTests.swift b/ios/Tests/JSReloadObserverTests.swift new file mode 100644 index 0000000..3a1da47 --- /dev/null +++ b/ios/Tests/JSReloadObserverTests.swift @@ -0,0 +1,214 @@ +import XCTest +@testable import ComapeoCore + +/// Tests for `JSReloadObserver` and the IPC-disconnect-on-reload contract +/// it provides for `ComapeoCoreModule`. +/// +/// Background: on iOS, Expo's `OnDestroy` does not fire when the React +/// Native JS context reloads (expo/expo#33655). The module-side workaround +/// is to subscribe to RN's reload notifications and disconnect the IPC +/// from the notification handler. These tests pin both halves of that: +/// +/// 1. `JSReloadObserver` runs its callback synchronously on the posting +/// thread for every notification name it was constructed with, and +/// detaches cleanly on deinit so a leaked instance can't run after +/// its module is gone. +/// 2. When wired to a live `NodeJSIPC` (the same way `ComapeoCoreModule` +/// wires it), posting a reload notification disconnects the IPC and +/// a real socket peer (`MockNodeServer`) observes EOF. +final class JSReloadObserverTests: XCTestCase { + + /// Use a private NotificationCenter so concurrent tests can't + /// observe each other's posts. Also avoids leaking observers into + /// `.default` if a test fails before deinit clean-up runs. + private var center: NotificationCenter! + private var testDir: String! + + override func setUp() { + super.setUp() + center = NotificationCenter() + testDir = TestPaths.makeShortTempDir(prefix: "rlo") + } + + override func tearDown() { + TestPaths.removeTempDir(testDir) + center = nil + super.tearDown() + } + + // MARK: - Unit tests + + func testFiresCallbackForObservedNotification() { + let name = Notification.Name("test.reload") + var calls = 0 + let observer = JSReloadObserver( + notificationNames: [name], + center: center + ) { + calls += 1 + } + // Keep `observer` alive for the duration of the test (the deinit + // cleanup is exercised separately below). + _ = observer + + center.post(name: name, object: nil) + XCTAssertEqual(calls, 1, "Callback must run exactly once per post") + + center.post(name: name, object: nil) + XCTAssertEqual(calls, 2, "Subsequent posts must each fire the callback") + } + + func testIgnoresOtherNotifications() { + let observed = Notification.Name("test.observed") + let other = Notification.Name("test.other") + var calls = 0 + let observer = JSReloadObserver( + notificationNames: [observed], + center: center + ) { + calls += 1 + } + _ = observer + + center.post(name: other, object: nil) + XCTAssertEqual(calls, 0, "Callback must not run for unobserved names") + } + + func testFiresForAnyOfMultipleNames() { + let nameA = Notification.Name("test.a") + let nameB = Notification.Name("test.b") + var calls = 0 + let observer = JSReloadObserver( + notificationNames: [nameA, nameB], + center: center + ) { + calls += 1 + } + _ = observer + + center.post(name: nameA, object: nil) + center.post(name: nameB, object: nil) + XCTAssertEqual( + calls, 2, + "Each observed name must independently trigger the callback" + ) + } + + func testDeinitDetachesObserver() { + let name = Notification.Name("test.deinit") + var calls = 0 + autoreleasepool { + let observer = JSReloadObserver( + notificationNames: [name], + center: center + ) { + calls += 1 + } + _ = observer + // Observer goes out of scope at autoreleasepool exit. + } + + center.post(name: name, object: nil) + XCTAssertEqual( + calls, 0, + "After deinit the observer must no longer fire — otherwise " + + "leaked module instances on iOS reload would each retain a live " + + "callback into a torn-down ipc reference." + ) + } + + func testCallbackRunsOnPostingThread() { + // Module callers rely on synchronous teardown (ipc.disconnect is + // synchronous on iOS) so that the backend observes EOF before + // RN continues with bridge teardown. Confirm the callback isn't + // hopped to a background queue. + let name = Notification.Name("test.thread") + let postingThread = Thread.current + var callbackThread: Thread? + let observer = JSReloadObserver( + notificationNames: [name], + center: center + ) { + callbackThread = Thread.current + } + _ = observer + + center.post(name: name, object: nil) + XCTAssertEqual( + callbackThread, postingThread, + "Callback must run on the posting thread (queue: nil)" + ) + } + + func testDefaultNotificationNamesIncludesReactNativeReloadNames() { + // Sanity-check the production wiring picks up the names RN + // actually posts. Hard-coded as strings on purpose (the React-Core + // header isn't available in the macOS swift-test target). + let names = JSReloadObserver.defaultNotificationNames.map { $0.rawValue } + XCTAssertTrue( + names.contains("RCTBridgeWillReloadNotification"), + "Old-architecture reload notification must be observed by default" + ) + XCTAssertTrue( + names.contains("RCTJavaScriptWillStartLoadingNotification"), + "Bridgeless / new-architecture reload notification must be observed by default" + ) + } + + // MARK: - Integration with NodeJSIPC + + /// End-to-end behavioural test for the wiring `ComapeoCoreModule` + /// installs in `OnCreate`: a `JSReloadObserver` whose callback + /// disconnects a `NodeJSIPC` connected to a real Unix-domain + /// socket peer. Posting the reload notification must disconnect the + /// client and the server must observe EOF on the next read. + func testReloadNotificationDisconnectsLiveIPC() throws { + let socketPath = (testDir as NSString).appendingPathComponent("reload.sock") + let server = MockNodeServer(socketPath: socketPath) + try server.start() + defer { server.stop() } + + let ipc = NodeJSIPC(socketPath: socketPath) { _ in } + let observer = JSReloadObserver( + notificationNames: [Notification.Name("test.reload")], + center: center + ) { [weak ipc] in + ipc?.disconnect() + } + _ = observer + + // Wait for the IPC to reach .connected before posting the + // notification — otherwise the disconnect races the connect + // and we can't distinguish "reload caused disconnect" from + // "connect never landed". + let serverFd = server.acceptClient() + XCTAssertGreaterThanOrEqual(serverFd, 0) + defer { if serverFd >= 0 { close(serverFd) } } + waitUntil("ipc connects", ipc.state == .connected) + + // Fire the reload notification — same thing + // RCTBridgeWillReloadNotification would do at runtime. + center.post(name: Notification.Name("test.reload"), object: nil) + + // Synchronous on iOS: by the time post() returns, disconnect() + // has run shutdown(2) → join receive → close(2). State must + // already be .disconnected. + XCTAssertEqual( + ipc.state, .disconnected, + "IPC must be disconnected by the time the notification post returns " + + "(iOS NodeJSIPC.disconnect is synchronous)" + ) + + // The server end of the socket should now read EOF (0 bytes) + // — that's the signal the backend's SocketMessagePort uses to + // emit 'close' and drive rpc-reflector subscription cleanup. + var byte: UInt8 = 0 + let n = read(serverFd, &byte, 1) + XCTAssertEqual( + n, 0, + "Server-side read must return 0 (EOF) after the client " + + "disconnects — that's what triggers the backend's per-connection " + + "rpc-reflector cleanup." + ) + } +} From fd10d19d1c8cbdeb0e320471327364eab5a4845e Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 29 Apr 2026 22:47:22 +0000 Subject: [PATCH 3/5] =?UTF-8?q?ios:=20remove=20JSReloadObserver=20?= =?UTF-8?q?=E2=80=94=20verified=20upstream=20fix=20is=20in=20SDK=2055?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Direct verification of the installed expo-modules-core@55.0.23 source (via `npm pack` + extract) confirms PR #33760's fix is present: package/ios/Core/MainValueConverter.swift:7 private(set) weak var appContext: AppContext? package/ios/Core/ModuleHolder.swift:140 deinit { post(event: .moduleDestroy) } package/ios/Core/AppContext.swift weak var reactBridge: RCTBridge? _runtime didSet → releaseRuntimeObjects() on nil deinit posts .appContextDestroys With the strong-ref cycle through MainValueConverter broken, the old AppContext is no longer pinned across reloads. On every JS reload it deinits, releases the module registry, every ModuleHolder deinits, each fires .moduleDestroy, every module's OnDestroy hook runs. The previous commit's JSReloadObserver was correct for pre-#33760 SDKs but is redundant against the version actually shipped here. Removing it reduces the number of teardown paths to one per platform (OnDestroy on both Android and iOS) and avoids confusing future maintainers with two mechanisms that mean the same thing. The OnDestroy comment now documents the verification and points at the RCT-notification fallback that should be reintroduced if a future SDK upgrade or other strong-ref leak reintroduces the bug. What stays from the previous commit: - backend SocketMessagePort.emit('close') fix (load-bearing — rpc-reflector cleanup runs from this event) - backend tests - android NodeJSIPC.close() instrumented test Co-Authored-By: Claude Opus 4.7 --- ios/ComapeoCoreModule.swift | 52 +++---- ios/JSReloadObserver.swift | 76 --------- ios/Package.swift | 1 - ios/Tests/JSReloadObserverTests.swift | 214 -------------------------- 4 files changed, 25 insertions(+), 318 deletions(-) delete mode 100644 ios/JSReloadObserver.swift delete mode 100644 ios/Tests/JSReloadObserverTests.swift diff --git a/ios/ComapeoCoreModule.swift b/ios/ComapeoCoreModule.swift index 9dc1829..773b048 100644 --- a/ios/ComapeoCoreModule.swift +++ b/ios/ComapeoCoreModule.swift @@ -2,13 +2,6 @@ import ExpoModulesCore public class ComapeoCoreModule: Module { private var ipc: NodeJSIPC? - /// Observes RN-posted reload notifications and disconnects the IPC - /// when the JS context is about to be torn down. Required because - /// Expo's `OnDestroy` does not fire on iOS reload (expo/expo#33655), - /// so the lifecycle-only path that works on Android leaves stale - /// sockets attached on every reload here. See `JSReloadObserver` - /// for the full rationale and choice of notification names. - private var reloadObserver: JSReloadObserver? // MARK: - Testable seams // @@ -42,18 +35,6 @@ public class ComapeoCoreModule: Module { self?.sendEvent("message", ["data": message]) } - // Subscribe to RN's reload notifications so the IPC closes - // before a fresh JS context opens its own connection. This - // is the iOS analogue of Android's `OnDestroy { close() }` - // — necessary because Expo's `OnDestroy` does not fire on - // iOS JS reload (expo/expo#33655). `disconnect()` is - // already synchronous on iOS (`shutdown(2)` → join receive - // loop → `close(2)`), so the backend observes EOF before - // the notification handler returns. - self.reloadObserver = JSReloadObserver { [weak self] in - self?.ipc?.disconnect() - } - // Observe service state changes. `onStateChange` is a single-slot // callback — assigning here replaces any previous observer. In normal // app operation only one ComapeoCoreModule instance is alive at a time, @@ -84,14 +65,31 @@ public class ComapeoCoreModule: Module { } OnDestroy { - // OnDestroy is unreliable on iOS reload (expo/expo#33655) — - // the equivalent teardown is wired via `reloadObserver` - // above. This block still runs on the regular paths it - // does fire on (e.g. final module deinit during process - // exit) and is kept idempotent so the two paths can race - // without harm: `disconnect()` early-returns when state - // is already `.disconnecting`/`.disconnected`. - self.reloadObserver = nil + // OnDestroy now fires reliably on iOS JS reload as of + // expo-modules-core's PR #33760 (merged Dec 2024, in SDK + // 53+). That PR made `MainValueConverter.appContext` weak, + // breaking the strong-reference cycle that previously + // pinned `AppContext` across reloads — verified locally + // against the installed `expo-modules-core@55.0.23`: + // ios/Core/MainValueConverter.swift:7 + // `private(set) weak var appContext: AppContext?` + // ios/Core/ModuleHolder.swift:140 + // `deinit { post(event: .moduleDestroy) }` + // With the cycle broken, AppContext deinits on reload, the + // module registry releases each ModuleHolder, and every + // ModuleHolder's deinit fires .moduleDestroy → this block. + // `disconnect()` is already synchronous on iOS + // (`shutdown(2)` → join receive loop → `close(2)`), so the + // backend observes EOF before the OnDestroy block returns + // and the rpc-reflector subscription cleanup runs against + // the prior session's connection. + // + // If a future SDK upgrade reintroduces the leak, the + // workaround is to add an NSNotificationCenter observer + // for `RCTBridgeWillReloadNotification` / + // `RCTJavaScriptWillStartLoadingNotification` in OnCreate + // and disconnect from there. Keep it simple here unless + // there's evidence we need it. self.ipc?.disconnect() self.ipc = nil } diff --git a/ios/JSReloadObserver.swift b/ios/JSReloadObserver.swift deleted file mode 100644 index d75eeaa..0000000 --- a/ios/JSReloadObserver.swift +++ /dev/null @@ -1,76 +0,0 @@ -import Foundation - -/// Observes the NSNotifications React Native posts when the JS runtime is -/// about to be torn down (developer reload, `DevSettings.reload()`, -/// fast-refresh full reload), and runs a caller-supplied closure on each -/// such event. -/// -/// Why this exists separately from Expo's `OnDestroy { ... }`: -/// `OnDestroy` does not fire on iOS when the JS context reloads — see -/// [expo/expo#33655](https://github.com/expo/expo/issues/33655). On -/// reload, Expo creates a fresh module instance via `OnCreate` but the -/// previous instance is leaked, with `OnDestroy` never invoked. That -/// leaves any IPC socket the previous instance owned still connected, -/// so the Node.js backend keeps emitting events to a peer that no -/// longer has anyone listening — and any rpc-reflector subscriptions -/// the previous JS session registered against `MapeoManager` stay -/// attached to the long-lived handler. -/// -/// Subscribing here gives us an iOS-side reload signal independent of -/// `OnDestroy`. Each living module instance disconnects its own IPC -/// when the notification fires, so the backend observes EOF on every -/// stale connection at the moment reload begins, regardless of whether -/// or when `OnDestroy` eventually runs. -/// -/// Notification choice: -/// - `RCTBridgeWillReloadNotification` is posted by RCTBridge in -/// classic-architecture iOS apps when a reload is initiated. -/// - `RCTJavaScriptWillStartLoadingNotification` is posted earlier in -/// the reload sequence on both architectures and is the most -/// reliable signal under bridgeless / new architecture, where -/// bridge-specific notifications are not always emitted. -/// -/// Both are observed; whichever lands first triggers the close. The -/// callback is idempotent on `NodeJSIPC` so a double-fire is harmless. -/// -/// The notification names are intentionally hard-coded as strings -/// rather than imported from React-Core so this file builds in the -/// macOS-only `ComapeoCore` Swift Package target (used by the -/// `ios/Tests/` suite). React isn't a dependency of that target. -final class JSReloadObserver { - - /// Notification names that React Native posts on JS context - /// teardown. Tests can override this to inject a synthetic name - /// when they don't want to drive a real RN notification. - static let defaultNotificationNames: [Notification.Name] = [ - Notification.Name("RCTBridgeWillReloadNotification"), - Notification.Name("RCTJavaScriptWillStartLoadingNotification"), - ] - - private let center: NotificationCenter - private var observers: [NSObjectProtocol] = [] - - init( - notificationNames: [Notification.Name] = JSReloadObserver.defaultNotificationNames, - center: NotificationCenter = .default, - onReload: @escaping () -> Void - ) { - self.center = center - // Run the callback synchronously on the posting thread (queue: nil) - // so the IPC disconnect completes before RCT continues with - // bridge teardown — same ordering guarantee Android's OnDestroy - // gives us, only achieved here via the notification rather than - // a lifecycle hook. - self.observers = notificationNames.map { name in - center.addObserver(forName: name, object: nil, queue: nil) { _ in - onReload() - } - } - } - - deinit { - for observer in observers { - center.removeObserver(observer) - } - } -} diff --git a/ios/Package.swift b/ios/Package.swift index d75aaaf..89cc43a 100644 --- a/ios/Package.swift +++ b/ios/Package.swift @@ -25,7 +25,6 @@ let package = Package( "NodeJSIPC.swift", "NodeJSService.swift", "ControlFrame.swift", - "JSReloadObserver.swift", "Log.swift", ] ), diff --git a/ios/Tests/JSReloadObserverTests.swift b/ios/Tests/JSReloadObserverTests.swift deleted file mode 100644 index 3a1da47..0000000 --- a/ios/Tests/JSReloadObserverTests.swift +++ /dev/null @@ -1,214 +0,0 @@ -import XCTest -@testable import ComapeoCore - -/// Tests for `JSReloadObserver` and the IPC-disconnect-on-reload contract -/// it provides for `ComapeoCoreModule`. -/// -/// Background: on iOS, Expo's `OnDestroy` does not fire when the React -/// Native JS context reloads (expo/expo#33655). The module-side workaround -/// is to subscribe to RN's reload notifications and disconnect the IPC -/// from the notification handler. These tests pin both halves of that: -/// -/// 1. `JSReloadObserver` runs its callback synchronously on the posting -/// thread for every notification name it was constructed with, and -/// detaches cleanly on deinit so a leaked instance can't run after -/// its module is gone. -/// 2. When wired to a live `NodeJSIPC` (the same way `ComapeoCoreModule` -/// wires it), posting a reload notification disconnects the IPC and -/// a real socket peer (`MockNodeServer`) observes EOF. -final class JSReloadObserverTests: XCTestCase { - - /// Use a private NotificationCenter so concurrent tests can't - /// observe each other's posts. Also avoids leaking observers into - /// `.default` if a test fails before deinit clean-up runs. - private var center: NotificationCenter! - private var testDir: String! - - override func setUp() { - super.setUp() - center = NotificationCenter() - testDir = TestPaths.makeShortTempDir(prefix: "rlo") - } - - override func tearDown() { - TestPaths.removeTempDir(testDir) - center = nil - super.tearDown() - } - - // MARK: - Unit tests - - func testFiresCallbackForObservedNotification() { - let name = Notification.Name("test.reload") - var calls = 0 - let observer = JSReloadObserver( - notificationNames: [name], - center: center - ) { - calls += 1 - } - // Keep `observer` alive for the duration of the test (the deinit - // cleanup is exercised separately below). - _ = observer - - center.post(name: name, object: nil) - XCTAssertEqual(calls, 1, "Callback must run exactly once per post") - - center.post(name: name, object: nil) - XCTAssertEqual(calls, 2, "Subsequent posts must each fire the callback") - } - - func testIgnoresOtherNotifications() { - let observed = Notification.Name("test.observed") - let other = Notification.Name("test.other") - var calls = 0 - let observer = JSReloadObserver( - notificationNames: [observed], - center: center - ) { - calls += 1 - } - _ = observer - - center.post(name: other, object: nil) - XCTAssertEqual(calls, 0, "Callback must not run for unobserved names") - } - - func testFiresForAnyOfMultipleNames() { - let nameA = Notification.Name("test.a") - let nameB = Notification.Name("test.b") - var calls = 0 - let observer = JSReloadObserver( - notificationNames: [nameA, nameB], - center: center - ) { - calls += 1 - } - _ = observer - - center.post(name: nameA, object: nil) - center.post(name: nameB, object: nil) - XCTAssertEqual( - calls, 2, - "Each observed name must independently trigger the callback" - ) - } - - func testDeinitDetachesObserver() { - let name = Notification.Name("test.deinit") - var calls = 0 - autoreleasepool { - let observer = JSReloadObserver( - notificationNames: [name], - center: center - ) { - calls += 1 - } - _ = observer - // Observer goes out of scope at autoreleasepool exit. - } - - center.post(name: name, object: nil) - XCTAssertEqual( - calls, 0, - "After deinit the observer must no longer fire — otherwise " + - "leaked module instances on iOS reload would each retain a live " + - "callback into a torn-down ipc reference." - ) - } - - func testCallbackRunsOnPostingThread() { - // Module callers rely on synchronous teardown (ipc.disconnect is - // synchronous on iOS) so that the backend observes EOF before - // RN continues with bridge teardown. Confirm the callback isn't - // hopped to a background queue. - let name = Notification.Name("test.thread") - let postingThread = Thread.current - var callbackThread: Thread? - let observer = JSReloadObserver( - notificationNames: [name], - center: center - ) { - callbackThread = Thread.current - } - _ = observer - - center.post(name: name, object: nil) - XCTAssertEqual( - callbackThread, postingThread, - "Callback must run on the posting thread (queue: nil)" - ) - } - - func testDefaultNotificationNamesIncludesReactNativeReloadNames() { - // Sanity-check the production wiring picks up the names RN - // actually posts. Hard-coded as strings on purpose (the React-Core - // header isn't available in the macOS swift-test target). - let names = JSReloadObserver.defaultNotificationNames.map { $0.rawValue } - XCTAssertTrue( - names.contains("RCTBridgeWillReloadNotification"), - "Old-architecture reload notification must be observed by default" - ) - XCTAssertTrue( - names.contains("RCTJavaScriptWillStartLoadingNotification"), - "Bridgeless / new-architecture reload notification must be observed by default" - ) - } - - // MARK: - Integration with NodeJSIPC - - /// End-to-end behavioural test for the wiring `ComapeoCoreModule` - /// installs in `OnCreate`: a `JSReloadObserver` whose callback - /// disconnects a `NodeJSIPC` connected to a real Unix-domain - /// socket peer. Posting the reload notification must disconnect the - /// client and the server must observe EOF on the next read. - func testReloadNotificationDisconnectsLiveIPC() throws { - let socketPath = (testDir as NSString).appendingPathComponent("reload.sock") - let server = MockNodeServer(socketPath: socketPath) - try server.start() - defer { server.stop() } - - let ipc = NodeJSIPC(socketPath: socketPath) { _ in } - let observer = JSReloadObserver( - notificationNames: [Notification.Name("test.reload")], - center: center - ) { [weak ipc] in - ipc?.disconnect() - } - _ = observer - - // Wait for the IPC to reach .connected before posting the - // notification — otherwise the disconnect races the connect - // and we can't distinguish "reload caused disconnect" from - // "connect never landed". - let serverFd = server.acceptClient() - XCTAssertGreaterThanOrEqual(serverFd, 0) - defer { if serverFd >= 0 { close(serverFd) } } - waitUntil("ipc connects", ipc.state == .connected) - - // Fire the reload notification — same thing - // RCTBridgeWillReloadNotification would do at runtime. - center.post(name: Notification.Name("test.reload"), object: nil) - - // Synchronous on iOS: by the time post() returns, disconnect() - // has run shutdown(2) → join receive → close(2). State must - // already be .disconnected. - XCTAssertEqual( - ipc.state, .disconnected, - "IPC must be disconnected by the time the notification post returns " + - "(iOS NodeJSIPC.disconnect is synchronous)" - ) - - // The server end of the socket should now read EOF (0 bytes) - // — that's the signal the backend's SocketMessagePort uses to - // emit 'close' and drive rpc-reflector subscription cleanup. - var byte: UInt8 = 0 - let n = read(serverFd, &byte, 1) - XCTAssertEqual( - n, 0, - "Server-side read must return 0 (EOF) after the client " + - "disconnects — that's what triggers the backend's per-connection " + - "rpc-reflector cleanup." - ) - } -} From 790d0aaf56fb6a6ceb8b380aa368a995c84b6de2 Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 29 Apr 2026 22:56:25 +0000 Subject: [PATCH 4/5] =?UTF-8?q?ios:=20clarify=20OnDestroy=20comment=20?= =?UTF-8?q?=E2=80=94=20avoid=20ambiguous=20"broken"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previous wording said the strong-reference cycle was "broken" — which read as "buggy / non-functional" rather than the intended "severed / no longer exists". Reworded to spell out the cause-and-effect in plain English: cycle → AppContext pinned → ModuleHolders pinned → deinit chain never runs → OnDestroy never fires; PR #33760 makes the reference weak so the cycle goes away and the chain runs as expected. No code change. Co-Authored-By: Claude Opus 4.7 --- ios/ComapeoCoreModule.swift | 37 +++++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/ios/ComapeoCoreModule.swift b/ios/ComapeoCoreModule.swift index 773b048..b57591a 100644 --- a/ios/ComapeoCoreModule.swift +++ b/ios/ComapeoCoreModule.swift @@ -65,31 +65,40 @@ public class ComapeoCoreModule: Module { } OnDestroy { - // OnDestroy now fires reliably on iOS JS reload as of - // expo-modules-core's PR #33760 (merged Dec 2024, in SDK - // 53+). That PR made `MainValueConverter.appContext` weak, - // breaking the strong-reference cycle that previously - // pinned `AppContext` across reloads — verified locally - // against the installed `expo-modules-core@55.0.23`: + // OnDestroy fires reliably on iOS JS reload as of + // expo-modules-core's PR #33760 (merged Dec 2024, shipped in + // SDK 53+). Previously, a strong-reference cycle through + // `MainValueConverter` kept `AppContext` alive across + // reloads — and as long as `AppContext` stayed alive, its + // `ModuleHolder`s stayed alive, their deinits never ran, + // and the `.moduleDestroy` event that triggers OnDestroy + // was never posted. PR #33760 changed + // `MainValueConverter.appContext` to a weak reference, + // which removes the cycle so `AppContext` can be released + // on reload. Verified against the installed + // `expo-modules-core@55.0.23`: // ios/Core/MainValueConverter.swift:7 // `private(set) weak var appContext: AppContext?` // ios/Core/ModuleHolder.swift:140 // `deinit { post(event: .moduleDestroy) }` - // With the cycle broken, AppContext deinits on reload, the - // module registry releases each ModuleHolder, and every - // ModuleHolder's deinit fires .moduleDestroy → this block. + // On reload: AppContext deinits → its module registry + // releases each ModuleHolder → each ModuleHolder's deinit + // fires `.moduleDestroy` → this block runs. + // // `disconnect()` is already synchronous on iOS // (`shutdown(2)` → join receive loop → `close(2)`), so the // backend observes EOF before the OnDestroy block returns // and the rpc-reflector subscription cleanup runs against // the prior session's connection. // - // If a future SDK upgrade reintroduces the leak, the - // workaround is to add an NSNotificationCenter observer - // for `RCTBridgeWillReloadNotification` / + // If a future SDK upgrade or a third-party module + // reintroduces a strong reference that pins `AppContext`, + // the fallback is to subscribe to + // `RCTBridgeWillReloadNotification` / // `RCTJavaScriptWillStartLoadingNotification` in OnCreate - // and disconnect from there. Keep it simple here unless - // there's evidence we need it. + // and call `disconnect()` from the notification handler. + // We don't pre-emptively wire that here — only one + // teardown path makes the lifecycle easier to reason about. self.ipc?.disconnect() self.ipc = nil } From 6b776e7a215c9de2f94784780f922ecadaa896be Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 30 Apr 2026 14:49:08 +0000 Subject: [PATCH 5/5] ci: wait for emulator services to be ready post-snapshot MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replaces the `sys.boot_completed`-only readiness check with a probe that actually exercises the services we're about to call. On the flaky run that motivated this: ##[command] adb wait-for-device shell 'while [[ -z $(getprop sys.boot_completed) ]]; do sleep 1; done' ##[command] adb shell settings put global window_animation_scale 0.0 cmd: Failure calling service settings: Broken pipe (32) ##[error] The process '/usr/bin/sh' failed with exit code 224 `sys.boot_completed` flipped as soon as zygote was alive — which on a snapshot restore happens before `system_server` finishes binding the `settings`, `package`, and `activity` services. The wait-for-device loop returned in ~14ms because the property was already set, and the very next `settings put` crashed with `Broken pipe (32)`. The new `wait_for_emulator` helper polls until all three of: - `sys.boot_completed == 1` - `pm path android` returns successfully (package manager bound) - `cmd settings list global` returns successfully (settings service bound) are true, with a 120s hard ceiling and a debug dump of `getprop | grep boot|svc` if it times out. One retry on each `settings put` covers a residual race where the probe sees `list` succeed but a write transaction loses to first-time initialisation. `disable-animations: false` is unchanged — emulator-runner's own `input keyevent 82` path still crashes the emulator on this image, which is the original reason the workflow drives the settings directly. Co-Authored-By: Claude Opus 4.7 --- .github/workflows/android-tests.yml | 57 ++++++++++++++++++++++++----- 1 file changed, 47 insertions(+), 10 deletions(-) diff --git a/.github/workflows/android-tests.yml b/.github/workflows/android-tests.yml index fbb45f5..0e7ea67 100644 --- a/.github/workflows/android-tests.yml +++ b/.github/workflows/android-tests.yml @@ -194,16 +194,53 @@ jobs: -noaudio -no-boot-anim disable-animations: false script: | - # Wait for system services after snapshot restore — - # sys.boot_completed can flip before the `input` service publishes, - # which makes emulator-runner's own `input keyevent 82` crash the - # emulator. We run this loop ourselves and skip the keyevent - # (disable-animations: false). - adb wait-for-device shell 'while [[ -z $(getprop sys.boot_completed) ]]; do sleep 1; done' - # Disable animations via settings (more reliable than input keyevent) - adb shell settings put global window_animation_scale 0.0 - adb shell settings put global transition_animation_scale 0.0 - adb shell settings put global animator_duration_scale 0.0 + # Post-snapshot service-readiness wait. + # + # `sys.boot_completed` flips as soon as zygote is alive, which on + # a snapshot restore happens before `system_server` finishes + # binding the `settings`, `package`, and `activity` services. + # The previous wait-for-device + `getprop` loop returned almost + # instantly because the property was already set, and the very + # next `adb shell settings put` then crashed with + # `cmd: Failure calling service settings: Broken pipe (32)` and + # exit-code 224 — the failure mode this is fixing. + # + # We poll each service we're about to call until it actually + # responds, with a hard 120s ceiling. Single retry on each + # `settings put` afterwards covers any final transient hiccup. + wait_for_emulator() { + local deadline=$((SECONDS + 120)) + while [[ $SECONDS -lt $deadline ]]; do + # adb 'tr -d \r' strips the CRLF Windows-style line endings + # the device sends back, otherwise the [[ == "1" ]] check + # fails on a stray \r. + bc=$(adb shell getprop sys.boot_completed 2>/dev/null | tr -d '\r') + if [[ "$bc" == "1" ]] \ + && adb shell 'pm path android' >/dev/null 2>&1 \ + && adb shell 'cmd settings list global' >/dev/null 2>&1; then + return 0 + fi + sleep 1 + done + echo "::error::Emulator services did not become ready within 120s" >&2 + adb shell getprop | grep -E 'boot|svc' >&2 || true + return 1 + } + wait_for_emulator + # Disable animations via settings (more reliable than input + # keyevent — emulator-runner's keyevent path crashes on some + # snapshots, which is why disable-animations is left at + # `false` and we drive the settings here directly). + # + # One retry on each `settings put` because the service can + # still hiccup briefly even after the readiness probe passes + # (the probe verifies it answers a `list`, but `put` opens a + # write transaction which can race with first-time + # initialisation). + for s in window_animation_scale transition_animation_scale animator_duration_scale; do + adb shell settings put global "$s" 0.0 \ + || { sleep 2; adb shell settings put global "$s" 0.0; } + done # -i prints per-test STARTED/PASSED/FAILED so a hanging test is # identifiable cd apps/example/android && ./gradlew :comapeo-core-react-native:connectedDebugAndroidTest :app:connectedDebugAndroidTest --no-daemon -i -PreactNativeArchitectures=x86_64