diff --git a/ios/RCTWebRTC/AudioDeviceModuleObserver.h b/ios/RCTWebRTC/AudioDeviceModuleObserver.h new file mode 100644 index 000000000..b163fb6c4 --- /dev/null +++ b/ios/RCTWebRTC/AudioDeviceModuleObserver.h @@ -0,0 +1,12 @@ +#import +#import "WebRTCModule.h" + +NS_ASSUME_NONNULL_BEGIN + +@interface AudioDeviceModuleObserver : NSObject + +- (instancetype)initWithWebRTCModule:(WebRTCModule *)module; + +@end + +NS_ASSUME_NONNULL_END diff --git a/ios/RCTWebRTC/AudioDeviceModuleObserver.m b/ios/RCTWebRTC/AudioDeviceModuleObserver.m new file mode 100644 index 000000000..53318726d --- /dev/null +++ b/ios/RCTWebRTC/AudioDeviceModuleObserver.m @@ -0,0 +1,184 @@ +#import "AudioDeviceModuleObserver.h" +#import + +NS_ASSUME_NONNULL_BEGIN + +@interface AudioDeviceModuleObserver () + +@property(weak, nonatomic) WebRTCModule *module; + +@end + +@implementation AudioDeviceModuleObserver + +- (instancetype)initWithWebRTCModule:(WebRTCModule *)module { + self = [super init]; + if (self) { + self.module = module; + RCTLog(@"[AudioDeviceModuleObserver] Initialized observer: %@ for module: %@", self, module); + } + return self; +} + +#pragma mark - RTCAudioDeviceModuleDelegate + +- (void)audioDeviceModule:(RTCAudioDeviceModule *)audioDeviceModule + didReceiveSpeechActivityEvent:(RTCSpeechActivityEvent)speechActivityEvent { + NSString *eventType = speechActivityEvent == RTCSpeechActivityEventStarted ? @"started" : @"ended"; + + if (self.module.bridge != nil) { + [self.module sendEventWithName:kEventAudioDeviceModuleSpeechActivity + body:@{ + @"event" : eventType, + }]; + } + + RCTLog(@"[AudioDeviceModuleObserver] Speech activity event: %@", eventType); +} + +- (NSInteger)audioDeviceModule:(RTCAudioDeviceModule *)audioDeviceModule didCreateEngine:(AVAudioEngine *)engine { + RCTLog(@"[AudioDeviceModuleObserver] Engine created"); + + if (self.module.bridge != nil) { + [self.module sendEventWithName:kEventAudioDeviceModuleEngineCreated body:@{}]; + } + + return 0; // Success +} + +- (NSInteger)audioDeviceModule:(RTCAudioDeviceModule *)audioDeviceModule + willEnableEngine:(AVAudioEngine *)engine + isPlayoutEnabled:(BOOL)isPlayoutEnabled + isRecordingEnabled:(BOOL)isRecordingEnabled { + RCTLog(@"[AudioDeviceModuleObserver] Engine will enable - playout: %d, recording: %d", + isPlayoutEnabled, + isRecordingEnabled); + + if (self.module.bridge != nil) { + [self.module sendEventWithName:kEventAudioDeviceModuleEngineWillEnable + body:@{ + @"isPlayoutEnabled" : @(isPlayoutEnabled), + @"isRecordingEnabled" : @(isRecordingEnabled), + }]; + } + + return 0; // Success +} + +- (NSInteger)audioDeviceModule:(RTCAudioDeviceModule *)audioDeviceModule + willStartEngine:(AVAudioEngine *)engine + isPlayoutEnabled:(BOOL)isPlayoutEnabled + isRecordingEnabled:(BOOL)isRecordingEnabled { + RCTLog(@"[AudioDeviceModuleObserver] Engine will start - playout: %d, recording: %d", + isPlayoutEnabled, + isRecordingEnabled); + + if (self.module.bridge != nil) { + [self.module sendEventWithName:kEventAudioDeviceModuleEngineWillStart + body:@{ + @"isPlayoutEnabled" : @(isPlayoutEnabled), + @"isRecordingEnabled" : @(isRecordingEnabled), + }]; + } + + return 0; // Success +} + +- (NSInteger)audioDeviceModule:(RTCAudioDeviceModule *)audioDeviceModule + didStopEngine:(AVAudioEngine *)engine + isPlayoutEnabled:(BOOL)isPlayoutEnabled + isRecordingEnabled:(BOOL)isRecordingEnabled { + RCTLog(@"[AudioDeviceModuleObserver] Engine did stop - playout: %d, recording: %d", + isPlayoutEnabled, + isRecordingEnabled); + + if (self.module.bridge != nil) { + [self.module sendEventWithName:kEventAudioDeviceModuleEngineDidStop + body:@{ + @"isPlayoutEnabled" : @(isPlayoutEnabled), + @"isRecordingEnabled" : @(isRecordingEnabled), + }]; + } + + return 0; // Success +} + +- (NSInteger)audioDeviceModule:(RTCAudioDeviceModule *)audioDeviceModule + didDisableEngine:(AVAudioEngine *)engine + isPlayoutEnabled:(BOOL)isPlayoutEnabled + isRecordingEnabled:(BOOL)isRecordingEnabled { + RCTLog(@"[AudioDeviceModuleObserver] Engine did disable - playout: %d, recording: %d", + isPlayoutEnabled, + isRecordingEnabled); + + if (self.module.bridge != nil) { + [self.module sendEventWithName:kEventAudioDeviceModuleEngineDidDisable + body:@{ + @"isPlayoutEnabled" : @(isPlayoutEnabled), + @"isRecordingEnabled" : @(isRecordingEnabled), + }]; + } + + return 0; // Success +} + +- (NSInteger)audioDeviceModule:(RTCAudioDeviceModule *)audioDeviceModule willReleaseEngine:(AVAudioEngine *)engine { + RCTLog(@"[AudioDeviceModuleObserver] Engine will release"); + + if (self.module.bridge != nil) { + [self.module sendEventWithName:kEventAudioDeviceModuleEngineWillRelease body:@{}]; + } + + return 0; // Success +} + +- (NSInteger)audioDeviceModule:(RTCAudioDeviceModule *)audioDeviceModule + engine:(AVAudioEngine *)engine + configureInputFromSource:(nullable AVAudioNode *)source + toDestination:(AVAudioNode *)destination + withFormat:(AVAudioFormat *)format + context:(NSDictionary *)context { + RCTLog(@"[AudioDeviceModuleObserver] Configure input - format: %@", format); + return 0; +} + +- (NSInteger)audioDeviceModule:(RTCAudioDeviceModule *)audioDeviceModule + engine:(AVAudioEngine *)engine + configureOutputFromSource:(AVAudioNode *)source + toDestination:(nullable AVAudioNode *)destination + withFormat:(AVAudioFormat *)format + context:(NSDictionary *)context { + RCTLog(@"[AudioDeviceModuleObserver] Configure output - format: %@", format); + return 0; +} + +- (void)audioDeviceModuleDidUpdateDevices:(RTCAudioDeviceModule *)audioDeviceModule { + if (self.module.bridge != nil) { + [self.module sendEventWithName:kEventAudioDeviceModuleDevicesUpdated body:@{}]; + } + + RCTLog(@"[AudioDeviceModuleObserver] Devices updated"); +} + +- (void)audioDeviceModule:(RTCAudioDeviceModule *)audioDeviceModule + didUpdateAudioProcessingState:(RTCAudioProcessingState)state { + if (self.module.bridge != nil) { + [self.module sendEventWithName:kEventAudioDeviceModuleAudioProcessingStateUpdated + body:@{ + @"voiceProcessingEnabled" : @(state.voiceProcessingEnabled), + @"voiceProcessingBypassed" : @(state.voiceProcessingBypassed), + @"voiceProcessingAGCEnabled" : @(state.voiceProcessingAGCEnabled), + @"stereoPlayoutEnabled" : @(state.stereoPlayoutEnabled), + }]; + } + + RCTLog(@"[AudioDeviceModuleObserver] Audio processing state updated - VP enabled: %d, VP bypassed: %d, AGC enabled: %d, stereo: %d", + state.voiceProcessingEnabled, + state.voiceProcessingBypassed, + state.voiceProcessingAGCEnabled, + state.stereoPlayoutEnabled); +} + +@end + +NS_ASSUME_NONNULL_END diff --git a/ios/RCTWebRTC/Utils/AudioDeviceModule/AudioDeviceModule.swift b/ios/RCTWebRTC/Utils/AudioDeviceModule/AudioDeviceModule.swift index 5212e41c9..f46c2c911 100644 --- a/ios/RCTWebRTC/Utils/AudioDeviceModule/AudioDeviceModule.swift +++ b/ios/RCTWebRTC/Utils/AudioDeviceModule/AudioDeviceModule.swift @@ -179,6 +179,10 @@ import WebRTC /// Strong reference to the current engine so we can introspect it if needed. @objc public var engine: AVAudioEngine? + /// Secondary observer that receives forwarded delegate callbacks. + /// This allows the AudioDeviceModuleObserver to receive events and forward them to JS. + private let delegateObserver: RTCAudioDeviceModuleDelegate + /// Textual diagnostics for logging and debugging. @objc public override var description: String { "{ " + @@ -195,12 +199,17 @@ import WebRTC } /// Creates a module that mirrors the provided WebRTC audio device module. - /// - Parameter source: The audio device module implementation to observe. + /// - Parameters: + /// - source: The audio device module implementation to observe. + /// - delegateObserver: The observer that receives forwarded delegate callbacks. + /// - audioLevelsNodeAdapter: Adapter for audio level monitoring. init( _ source: any RTCAudioDeviceModuleControlling, + delegateObserver: RTCAudioDeviceModuleDelegate, audioLevelsNodeAdapter: AudioEngineNodeAdapting = AudioEngineLevelNodeAdapter() ) { self.source = source + self.delegateObserver = delegateObserver self.isPlayingSubject = .init(source.isPlaying) self.isRecordingSubject = .init(source.isRecording) self.isMicrophoneMutedSubject = .init(source.isMicrophoneMuted) @@ -219,15 +228,18 @@ import WebRTC .eraseToAnyPublisher() super.init() + _ = source.setMuteMode(.inputMixer) audioLevelsAdapter.subject = audioLevelSubject source.observer = self } /// Objective-C compatible convenience initializer. - /// - Parameter source: The RTCAudioDeviceModule to wrap. + /// - Parameters: + /// - source: The RTCAudioDeviceModule to wrap. + /// - delegateObserver: The observer that receives forwarded delegate callbacks. @objc public - convenience init(source: RTCAudioDeviceModule) { - self.init(source as any RTCAudioDeviceModuleControlling, audioLevelsNodeAdapter: AudioEngineLevelNodeAdapter()) + convenience init(source: RTCAudioDeviceModule, delegateObserver: RTCAudioDeviceModuleDelegate) { + self.init(source as any RTCAudioDeviceModuleControlling, delegateObserver: delegateObserver, audioLevelsNodeAdapter: AudioEngineLevelNodeAdapter()) } // MARK: - Recording @@ -235,6 +247,7 @@ import WebRTC /// Reinitializes the ADM, clearing its internal audio graph state. @objc public func reset() { _ = source.reset() + _ = source.setMuteMode(.inputMixer) } /// Switches between stereo and mono playout while keeping the recording @@ -247,7 +260,7 @@ import WebRTC /// means that for outputs where VP is disabled (e.g. stereo) we cannot mute/unmute. /// - `.restartEngine`: rebuilds the whole graph and requires explicit calling of /// `initAndStartRecording` . - _ = source.setMuteMode(isPreferred ? .inputMixer : .voiceProcessing) + // _ = source.setMuteMode(isPreferred ? .inputMixer : .voiceProcessing) /// - Important: We can probably set this one to false when the user doesn't have /// sendAudio capability. _ = source.setRecordingAlwaysPreparedMode(false) @@ -338,12 +351,17 @@ import WebRTC ) { switch speechActivityEvent { case .started: + NSLog("[Callingx | AudioDeviceModule] speechActivityStarted") subject.send(.speechActivityStarted) case .ended: + NSLog("[Callingx | AudioDeviceModule] speechActivityEnded") subject.send(.speechActivityEnded) @unknown default: break } + + // Forward to observer + delegateObserver.audioDeviceModule(audioDeviceModule, didReceiveSpeechActivityEvent: speechActivityEvent) } /// Stores the created engine reference and emits an event so observers can @@ -354,6 +372,10 @@ import WebRTC ) -> Int { self.engine = engine subject.send(.didCreateAudioEngine(engine)) + + // Forward to observer + delegateObserver.audioDeviceModule(audioDeviceModule, didCreateEngine: engine) + return Constant.successResult } @@ -374,6 +396,15 @@ import WebRTC ) isPlayingSubject.send(isPlayoutEnabled) isRecordingSubject.send(isRecordingEnabled) + + // Forward to observer + delegateObserver.audioDeviceModule( + audioDeviceModule, + willEnableEngine: engine, + isPlayoutEnabled: isPlayoutEnabled, + isRecordingEnabled: isRecordingEnabled + ) + return Constant.successResult } @@ -395,6 +426,14 @@ import WebRTC isPlayingSubject.send(isPlayoutEnabled) isRecordingSubject.send(isRecordingEnabled) + // Forward to observer + delegateObserver.audioDeviceModule( + audioDeviceModule, + willStartEngine: engine, + isPlayoutEnabled: isPlayoutEnabled, + isRecordingEnabled: isRecordingEnabled + ) + return Constant.successResult } @@ -415,6 +454,15 @@ import WebRTC ) isPlayingSubject.send(isPlayoutEnabled) isRecordingSubject.send(isRecordingEnabled) + + // Forward to observer + delegateObserver.audioDeviceModule( + audioDeviceModule, + didStopEngine: engine, + isPlayoutEnabled: isPlayoutEnabled, + isRecordingEnabled: isRecordingEnabled + ) + return Constant.successResult } @@ -435,6 +483,15 @@ import WebRTC ) isPlayingSubject.send(isPlayoutEnabled) isRecordingSubject.send(isRecordingEnabled) + + // Forward to observer + delegateObserver.audioDeviceModule( + audioDeviceModule, + didDisableEngine: engine, + isPlayoutEnabled: isPlayoutEnabled, + isRecordingEnabled: isRecordingEnabled + ) + return Constant.successResult } @@ -446,6 +503,10 @@ import WebRTC self.engine = nil subject.send(.willReleaseAudioEngine(engine)) audioLevelsAdapter.uninstall(on: 0) + + // Forward to observer + delegateObserver.audioDeviceModule(audioDeviceModule, willReleaseEngine: engine) + return Constant.successResult } @@ -473,6 +534,17 @@ import WebRTC bus: 0, bufferSize: 1024 ) + + // Forward to observer + delegateObserver.audioDeviceModule( + audioDeviceModule, + engine: engine, + configureInputFromSource: source, + toDestination: destination, + format: format, + context: context + ) + return Constant.successResult } @@ -493,6 +565,17 @@ import WebRTC format: format ) ) + + // Forward to observer + delegateObserver.audioDeviceModule( + audioDeviceModule, + engine: engine, + configureOutputFromSource: source, + toDestination: destination, + format: format, + context: context + ) + return Constant.successResult } @@ -500,7 +583,8 @@ import WebRTC public func audioDeviceModuleDidUpdateDevices( _ audioDeviceModule: RTCAudioDeviceModule ) { - // No-op + // Forward to observer + delegateObserver.audioDeviceModuleDidUpdateDevices(audioDeviceModule) } /// Mirrors state changes coming from CallKit/WebRTC voice-processing @@ -521,6 +605,9 @@ import WebRTC isVoiceProcessingBypassedSubject.send(state.voiceProcessingBypassed) isVoiceProcessingAGCEnabledSubject.send(state.voiceProcessingAGCEnabled) isStereoPlayoutEnabledSubject.send(state.stereoPlayoutEnabled) + + // Forward to observer + delegateObserver.audioDeviceModule(module, didUpdateAudioProcessingState: state) } /// Mirrors the subset of properties that can be encoded for debugging. diff --git a/ios/RCTWebRTC/WebRTCModule+RTCMediaStream.m b/ios/RCTWebRTC/WebRTCModule+RTCMediaStream.m index d828f4260..682fc3473 100644 --- a/ios/RCTWebRTC/WebRTCModule+RTCMediaStream.m +++ b/ios/RCTWebRTC/WebRTCModule+RTCMediaStream.m @@ -164,7 +164,6 @@ - (NSArray *)createMediaStream:(NSArray *)tracks { return @[ mediaStreamId, trackInfos ]; #endif } - /** * Initializes a new {@link RTCVideoTrack} which satisfies the given constraints. */ @@ -495,12 +494,13 @@ - (void)removeLocalVideoTrackDimensionDetection:(RTCVideoTrack *)videoTrack { RTCMediaStreamTrack *track = self.localTracks[trackID]; if (track) { - // Clean up dimension detection for local video tracks if ([track.kind isEqualToString:@"video"]) { + // Clean up dimension detection for local video tracks [self removeLocalVideoTrackDimensionDetection:(RTCVideoTrack *)track]; } - - track.isEnabled = NO; + if (track.isEnabled) { + track.isEnabled = NO; + } [track.captureController stopCapture]; [self.localTracks removeObjectForKey:trackID]; } @@ -559,6 +559,7 @@ - (void)removeLocalVideoTrackDimensionDetection:(RTCVideoTrack *)videoTrack { } track.isEnabled = enabled; + #if !TARGET_OS_TV if (track.captureController) { // It could be a remote track! if (enabled) { diff --git a/ios/RCTWebRTC/WebRTCModule.h b/ios/RCTWebRTC/WebRTCModule.h index 5f20e3fb7..538240911 100644 --- a/ios/RCTWebRTC/WebRTCModule.h +++ b/ios/RCTWebRTC/WebRTCModule.h @@ -22,6 +22,16 @@ static NSString *const kEventVideoTrackDimensionChanged = @"videoTrackDimensionC static NSString *const kEventMediaStreamTrackEnded = @"mediaStreamTrackEnded"; static NSString *const kEventPeerConnectionOnRemoveTrack = @"peerConnectionOnRemoveTrack"; static NSString *const kEventPeerConnectionOnTrack = @"peerConnectionOnTrack"; +static NSString *const kEventFrameCryptionStateChanged = @"frameCryptionStateChanged"; +static NSString *const kEventAudioDeviceModuleSpeechActivity = @"audioDeviceModuleSpeechActivity"; +static NSString *const kEventAudioDeviceModuleEngineCreated = @"audioDeviceModuleEngineCreated"; +static NSString *const kEventAudioDeviceModuleEngineWillEnable = @"audioDeviceModuleEngineWillEnable"; +static NSString *const kEventAudioDeviceModuleEngineWillStart = @"audioDeviceModuleEngineWillStart"; +static NSString *const kEventAudioDeviceModuleEngineDidStop = @"audioDeviceModuleEngineDidStop"; +static NSString *const kEventAudioDeviceModuleEngineDidDisable = @"audioDeviceModuleEngineDidDisable"; +static NSString *const kEventAudioDeviceModuleEngineWillRelease = @"audioDeviceModuleEngineWillRelease"; +static NSString *const kEventAudioDeviceModuleDevicesUpdated = @"audioDeviceModuleDevicesUpdated"; +static NSString *const kEventAudioDeviceModuleAudioProcessingStateUpdated = @"audioDeviceModuleAudioProcessingStateUpdated"; @class AudioDeviceModule; @@ -38,6 +48,10 @@ static NSString *const kEventPeerConnectionOnTrack = @"peerConnectionOnTrack"; @property(nonatomic, strong) NSMutableDictionary *localStreams; @property(nonatomic, strong) NSMutableDictionary *localTracks; +@property(nonatomic, strong) NSMutableDictionary *frameCryptors; +@property(nonatomic, strong) NSMutableDictionary *keyProviders; +@property(nonatomic, strong) NSMutableDictionary *dataPacketCryptors; + - (RTCMediaStream *)streamForReactTag:(NSString *)reactTag; @end diff --git a/ios/RCTWebRTC/WebRTCModule.m b/ios/RCTWebRTC/WebRTCModule.m index 3d6f74d38..da9a335a2 100644 --- a/ios/RCTWebRTC/WebRTCModule.m +++ b/ios/RCTWebRTC/WebRTCModule.m @@ -7,6 +7,7 @@ #import #import +#import "AudioDeviceModuleObserver.h" #import "WebRTCModule+RTCPeerConnection.h" #import "WebRTCModule.h" #import "WebRTCModuleOptions.h" @@ -22,6 +23,9 @@ #endif @interface WebRTCModule () + +@property(nonatomic, strong) AudioDeviceModuleObserver *rtcAudioDeviceModuleObserver; + @end @implementation WebRTCModule @@ -106,13 +110,19 @@ - (instancetype)init { decoderFactory:decoderFactory audioProcessingModule:nil]; } - - _audioDeviceModule = [[AudioDeviceModule alloc] initWithSource:_peerConnectionFactory.audioDeviceModule]; + + _rtcAudioDeviceModuleObserver = [[AudioDeviceModuleObserver alloc] initWithWebRTCModule:self]; + _audioDeviceModule = [[AudioDeviceModule alloc] initWithSource:_peerConnectionFactory.audioDeviceModule + delegateObserver:_rtcAudioDeviceModuleObserver]; _peerConnections = [NSMutableDictionary new]; _localStreams = [NSMutableDictionary new]; _localTracks = [NSMutableDictionary new]; + _frameCryptors = [NSMutableDictionary new]; + _keyProviders = [NSMutableDictionary new]; + _dataPacketCryptors = [NSMutableDictionary new]; + dispatch_queue_attr_t attributes = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INITIATED, -1); _workerQueue = dispatch_queue_create("WebRTCModule.queue", attributes); @@ -157,7 +167,17 @@ - (dispatch_queue_t)methodQueue { kEventVideoTrackDimensionChanged, kEventMediaStreamTrackEnded, kEventPeerConnectionOnRemoveTrack, - kEventPeerConnectionOnTrack + kEventPeerConnectionOnTrack, + kEventFrameCryptionStateChanged, + kEventAudioDeviceModuleSpeechActivity, + kEventAudioDeviceModuleEngineCreated, + kEventAudioDeviceModuleEngineWillEnable, + kEventAudioDeviceModuleEngineWillStart, + kEventAudioDeviceModuleEngineDidStop, + kEventAudioDeviceModuleEngineDidDisable, + kEventAudioDeviceModuleEngineWillRelease, + kEventAudioDeviceModuleDevicesUpdated, + kEventAudioDeviceModuleAudioProcessingStateUpdated ]; } diff --git a/package.json b/package.json index 5998fe623..dd433fa52 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@stream-io/react-native-webrtc", - "version": "137.1.1", + "version": "137.1.2-alpha.1", "repository": { "type": "git", "url": "git+https://github.com/GetStream/react-native-webrtc.git" diff --git a/src/AudioDeviceModuleEvents.ts b/src/AudioDeviceModuleEvents.ts new file mode 100644 index 000000000..190a62cde --- /dev/null +++ b/src/AudioDeviceModuleEvents.ts @@ -0,0 +1,147 @@ +import { NativeEventEmitter, NativeModules, Platform } from 'react-native'; + +const { WebRTCModule } = NativeModules; + +export type SpeechActivityEvent = 'started' | 'ended'; + +export interface SpeechActivityEventData { + event: SpeechActivityEvent; +} + +export interface EngineStateEventData { + isPlayoutEnabled: boolean; + isRecordingEnabled: boolean; +} + +export interface AudioProcessingStateEventData { + voiceProcessingEnabled: boolean; + voiceProcessingBypassed: boolean; + voiceProcessingAGCEnabled: boolean; + stereoPlayoutEnabled: boolean; +} + +export type AudioDeviceModuleEventData = + | SpeechActivityEventData + | EngineStateEventData + | AudioProcessingStateEventData + | Record; // Empty object for events with no data + +/** + * Event emitter for RTCAudioDeviceModule delegate callbacks. + * iOS/macOS only. + */ +class AudioDeviceModuleEventEmitter { + private eventEmitter: NativeEventEmitter | null = null; + + public setupListeners() { + // Only setup once (idempotent) + if (this.eventEmitter !== null) { + return; + } + + if (Platform.OS !== 'android' && WebRTCModule) { + this.eventEmitter = new NativeEventEmitter(WebRTCModule); + } + } + + /** + * Subscribe to speech activity events (started/ended) + */ + addSpeechActivityListener(listener: (data: SpeechActivityEventData) => void) { + if (!this.eventEmitter) { + throw new Error('AudioDeviceModuleEvents is only available on iOS/macOS'); + } + + return this.eventEmitter.addListener('audioDeviceModuleSpeechActivity', listener); + } + + /** + * Subscribe to devices updated event (input/output devices changed) + */ + addDevicesUpdatedListener(listener: () => void) { + if (!this.eventEmitter) { + throw new Error('AudioDeviceModuleEvents is only available on iOS/macOS'); + } + + return this.eventEmitter.addListener('audioDeviceModuleDevicesUpdated', listener); + } + + /** + * Subscribe to audio processing state updated event + */ + addAudioProcessingStateUpdatedListener(listener: (data: AudioProcessingStateEventData) => void) { + if (!this.eventEmitter) { + throw new Error('AudioDeviceModuleEvents is only available on iOS/macOS'); + } + + return this.eventEmitter.addListener('audioDeviceModuleAudioProcessingStateUpdated', listener); + } + + /** + * Subscribe to engine created event + */ + addEngineCreatedListener(listener: () => void) { + if (!this.eventEmitter) { + throw new Error('AudioDeviceModuleEvents is only available on iOS/macOS'); + } + + return this.eventEmitter.addListener('audioDeviceModuleEngineCreated', listener); + } + + /** + * Subscribe to engine will enable event + */ + addEngineWillEnableListener(listener: (data: EngineStateEventData) => void) { + if (!this.eventEmitter) { + throw new Error('AudioDeviceModuleEvents is only available on iOS/macOS'); + } + + return this.eventEmitter.addListener('audioDeviceModuleEngineWillEnable', listener); + } + + /** + * Subscribe to engine will start event + */ + addEngineWillStartListener(listener: (data: EngineStateEventData) => void) { + if (!this.eventEmitter) { + throw new Error('AudioDeviceModuleEvents is only available on iOS/macOS'); + } + + return this.eventEmitter.addListener('audioDeviceModuleEngineWillStart', listener); + } + + /** + * Subscribe to engine did stop event + */ + addEngineDidStopListener(listener: (data: EngineStateEventData) => void) { + if (!this.eventEmitter) { + throw new Error('AudioDeviceModuleEvents is only available on iOS/macOS'); + } + + return this.eventEmitter.addListener('audioDeviceModuleEngineDidStop', listener); + } + + /** + * Subscribe to engine did disable event + */ + addEngineDidDisableListener(listener: (data: EngineStateEventData) => void) { + if (!this.eventEmitter) { + throw new Error('AudioDeviceModuleEvents is only available on iOS/macOS'); + } + + return this.eventEmitter.addListener('audioDeviceModuleEngineDidDisable', listener); + } + + /** + * Subscribe to engine will release event + */ + addEngineWillReleaseListener(listener: () => void) { + if (!this.eventEmitter) { + throw new Error('AudioDeviceModuleEvents is only available on iOS/macOS'); + } + + return this.eventEmitter.addListener('audioDeviceModuleEngineWillRelease', listener); + } +} + +export const audioDeviceModuleEvents = new AudioDeviceModuleEventEmitter(); diff --git a/src/index.ts b/src/index.ts index bda35462d..496c83b96 100644 --- a/src/index.ts +++ b/src/index.ts @@ -8,6 +8,7 @@ if (WebRTCModule === null) { }`); } +import { audioDeviceModuleEvents } from './AudioDeviceModuleEvents'; import { setupNativeEvents } from './EventEmitter'; import Logger from './Logger'; import mediaDevices from './MediaDevices'; @@ -31,6 +32,9 @@ Logger.enable(`${Logger.ROOT_PREFIX}:*`); // Add listeners for the native events early, since they are added asynchronously. setupNativeEvents(); +// Ensure audioDeviceModuleEvents is initialized and event listeners are registered +audioDeviceModuleEvents.setupListeners(); + export { RTCIceCandidate, RTCPeerConnection, @@ -47,7 +51,8 @@ export { type MediaTrackSettings, mediaDevices, permissions, - registerGlobals + registerGlobals, + audioDeviceModuleEvents, }; declare const global: any;