Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions packages/client/src/Call.ts
Original file line number Diff line number Diff line change
Expand Up @@ -372,6 +372,9 @@ export class Call {
this.registerEffects();
this.registerReconnectHandlers();

// Set up the device managers again. Although this is already done
// in the DeviceManager's constructor, they'll need to be re-set up
// in the cases where a call instance is recycled (join -> leave -> join).
this.camera.setup();
this.microphone.setup();
this.screenShare.setup();
Expand Down
22 changes: 20 additions & 2 deletions packages/client/src/coordinator/connection/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -108,17 +108,35 @@ export type ConnectionRecoveredEvent = {
type: 'connection.recovered';
};

export type MicCaptureReportEvent = {
type: 'mic.capture_report';
call_cid: string;
/**
* Whether the mic is capturing audio.
*/
capturesAudio: boolean;
/**
* The device ID of the mic.
*/
deviceId?: string;
/**
* The human-readable label of the mic.
*/
label?: string;
};

export type StreamVideoEvent = (
| VideoEvent
| NetworkChangedEvent
| ConnectionChangedEvent
| TransportChangedEvent
| ConnectionRecoveredEvent
| MicCaptureReportEvent
) & { received_at?: string | Date };

// TODO: we should use WSCallEvent here but that needs fixing
export type StreamCallEvent = Extract<StreamVideoEvent, { call_cid: string }>;
export type EventTypes = 'all' | VideoEvent['type'];
export type EventTypes = 'all' | StreamVideoEvent['type'];

export type AllClientEventTypes = 'all' | StreamVideoEvent['type'];
export type AllClientEvents = {
Expand All @@ -129,7 +147,7 @@ export type ClientEventListener<E extends keyof AllClientEvents> = (
) => void;

export type AllClientCallEvents = {
[K in EventTypes]: Extract<VideoEvent, { type: K }>;
[K in EventTypes]: Extract<StreamVideoEvent, { type: K }>;
};

export type AllCallEvents = AllClientCallEvents & AllSfuEvents;
Expand Down
4 changes: 2 additions & 2 deletions packages/client/src/devices/DeviceManager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ export abstract class DeviceManager<
protected readonly call: Call;
protected readonly trackType: TrackType;
protected subscriptions: Function[] = [];
private areSubscriptionsSetUp = false;
protected areSubscriptionsSetUp = false;
private isTrackStoppedDueToTrackEnd = false;
private filters: MediaStreamFilterEntry[] = [];
private statusChangeConcurrencyTag = Symbol('statusChangeConcurrencyTag');
Expand Down Expand Up @@ -558,7 +558,7 @@ export abstract class DeviceManager<
);
}

private findDevice(devices: MediaDeviceInfo[], deviceId: string) {
protected findDevice(devices: MediaDeviceInfo[], deviceId: string) {
const kind = this.mediaDeviceKind;
return devices.find((d) => d.deviceId === deviceId && d.kind === kind);
}
Expand Down
97 changes: 94 additions & 3 deletions packages/client/src/devices/MicrophoneManager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,35 +10,44 @@ import { TrackDisableMode } from './DeviceManagerState';
import { getAudioDevices, getAudioStream } from './devices';
import { AudioBitrateProfile, TrackType } from '../gen/video/sfu/models/models';
import { createSoundDetector } from '../helpers/sound-detector';
import { createNoAudioDetector } from '../helpers/no-audio-detector';
import { isReactNative } from '../helpers/platforms';
import {
AudioSettingsResponse,
NoiseCancellationSettingsModeEnum,
OwnCapability,
} from '../gen/coordinator';
import { type MicCaptureReportEvent } from '../coordinator/connection/types';
import { CallingState } from '../store';
import {
createSafeAsyncSubscription,
createSubscription,
getCurrentValue,
} from '../store/rxUtils';
import { RNSpeechDetector } from '../helpers/RNSpeechDetector';
import { withoutConcurrency } from '../helpers/concurrency';
import { disposeOfMediaStream } from './utils';
import { promiseWithResolvers } from '../helpers/promise';

export class MicrophoneManager extends AudioDeviceManager<MicrophoneManagerState> {
private speakingWhileMutedNotificationEnabled = true;
private soundDetectorConcurrencyTag = Symbol('soundDetectorConcurrencyTag');
private soundDetectorCleanup?: Function;
private soundDetectorCleanup?: () => Promise<void>;
private noAudioDetectorCleanup?: () => Promise<void>;
private rnSpeechDetector: RNSpeechDetector | undefined;
private noiseCancellation: INoiseCancellation | undefined;
private noiseCancellationChangeUnsubscribe: (() => void) | undefined;
private noiseCancellationRegistration?: Promise<void>;
private unregisterNoiseCancellation?: () => Promise<void>;

private silenceThresholdMs = 5000;

constructor(call: Call, disableMode: TrackDisableMode = 'stop-tracks') {
super(call, new MicrophoneManagerState(disableMode), TrackType.AUDIO);
}

override setup(): void {
if (this.areSubscriptionsSetUp) return;
super.setup();
this.subscriptions.push(
createSafeAsyncSubscription(
Expand Down Expand Up @@ -110,6 +119,45 @@ export class MicrophoneManager extends AudioDeviceManager<MicrophoneManagerState
}
}),
);

if (!isReactNative()) {
const unsubscribe = createSafeAsyncSubscription(
combineLatest([this.state.status$, this.state.mediaStream$]),
async ([status, mediaStream]) => {
if (this.noAudioDetectorCleanup) {
const cleanup = this.noAudioDetectorCleanup;
this.noAudioDetectorCleanup = undefined;
await cleanup().catch((err) => {
this.logger.warn('Failed to stop no-audio detector', err);
});
}

if (status !== 'enabled' || !mediaStream) return;
if (this.silenceThresholdMs <= 0) return;

const deviceId = this.state.selectedDevice;
const devices = getCurrentValue(this.listDevices());
const label = devices.find((d) => d.deviceId === deviceId)?.label;

this.noAudioDetectorCleanup = createNoAudioDetector(mediaStream, {
noAudioThresholdMs: this.silenceThresholdMs,
emitIntervalMs: this.silenceThresholdMs,
onCaptureStatusChange: (capturesAudio) => {
const event: MicCaptureReportEvent = {
type: 'mic.capture_report',
call_cid: this.call.cid,
capturesAudio,
deviceId,
label,
};
this.call.tracer.trace('mic.capture_report', event);
this.call.streamClient.dispatchEvent(event);
},
});
},
);
this.subscriptions.push(unsubscribe);
}
}

/**
Expand Down Expand Up @@ -224,6 +272,50 @@ export class MicrophoneManager extends AudioDeviceManager<MicrophoneManagerState
await this.stopSpeakingWhileMutedDetection();
}

/**
* Sets the silence threshold in milliseconds for no-audio detection.
* When the microphone is enabled but produces no audio for this duration,
* a 'mic.capture_report' event will be emitted.
*
* @param thresholdMs the threshold in milliseconds (default: 5000).
* Set to 0 or a negative value to disable no-audio detection.
*/
setSilenceThreshold(thresholdMs: number) {
this.silenceThresholdMs = thresholdMs;
}
Comment thread
oliverlaz marked this conversation as resolved.

/**
* Performs audio capture test on a specific microphone.
*
* This method is only available in browser environments (not React Native).
*
* @param deviceId The device ID to test.
* @param options Optional test configuration.
* @returns Promise that resolves with the test result (true or false).
*/
async performTest(
deviceId: string,
options?: { testDurationMs?: number },
): Promise<boolean> {
if (isReactNative()) throw new Error('Not available in React Native');

const stream = await this.getStream({ deviceId: { exact: deviceId } });
const { testDurationMs = 3000 } = options || {};
const { promise, resolve } = promiseWithResolvers<boolean>();
const cleanup = createNoAudioDetector(stream, {
noAudioThresholdMs: testDurationMs,
emitIntervalMs: testDurationMs,
onCaptureStatusChange: async (capturesAudio) => {
resolve(capturesAudio);
await cleanup().catch((err) => {
this.logger.warn('Failed to stop detector during test', err);
});
disposeOfMediaStream(stream);
},
});
return promise;
}
Comment thread
oliverlaz marked this conversation as resolved.

/**
* Applies the audio settings to the microphone.
* @param settings the audio settings to apply.
Expand Down Expand Up @@ -284,13 +376,12 @@ export class MicrophoneManager extends AudioDeviceManager<MicrophoneManagerState

private async startSpeakingWhileMutedDetection(deviceId?: string) {
await withoutConcurrency(this.soundDetectorConcurrencyTag, async () => {
await this.stopSpeakingWhileMutedDetection();
if (isReactNative()) {
this.rnSpeechDetector = new RNSpeechDetector();
const unsubscribe = await this.rnSpeechDetector.start((event) => {
this.state.setSpeakingWhileMuted(event.isSoundDetected);
});
this.soundDetectorCleanup = () => {
this.soundDetectorCleanup = async () => {
unsubscribe();
this.rnSpeechDetector = undefined;
};
Expand Down
1 change: 1 addition & 0 deletions packages/client/src/devices/ScreenShareManager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ export class ScreenShareManager extends AudioDeviceManager<
}

override setup(): void {
if (this.areSubscriptionsSetUp) return;
super.setup();
this.subscriptions.push(
createSubscription(this.call.state.settings$, (settings) => {
Expand Down
98 changes: 97 additions & 1 deletion packages/client/src/devices/__tests__/MicrophoneManager.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,18 @@ import {
mockCall,
mockDeviceIds$,
} from './mocks';
import { setupAudioContextMock } from './web-audio.mocks';
Comment thread
oliverlaz marked this conversation as resolved.
import { getAudioStream } from '../devices';
import { MicrophoneManager } from '../MicrophoneManager';
import { of } from 'rxjs';
import {
createSoundDetector,
SoundStateChangeHandler,
} from '../../helpers/sound-detector';
import {
createNoAudioDetector,
NoAudioDetectorOptions,
} from '../../helpers/no-audio-detector';
import { PermissionsContext } from '../../permissions';
import { Tracer } from '../../stats';

Expand All @@ -52,6 +57,13 @@ vi.mock('../../helpers/sound-detector.ts', () => {
};
});

vi.mock('../../helpers/no-audio-detector.ts', () => {
console.log('MOCKING no-audio detector');
return {
createNoAudioDetector: vi.fn(() => async () => {}),
};
});

vi.mock('../../Call.ts', () => {
console.log('MOCKING Call');
return {
Expand All @@ -64,6 +76,8 @@ describe('MicrophoneManager', () => {
let call: Call;

beforeEach(() => {
setupAudioContextMock();

call = new Call({
id: '',
type: '',
Expand Down Expand Up @@ -154,7 +168,7 @@ describe('MicrophoneManager', () => {

it(`should stop sound detection if mic is enabled`, async () => {
manager.state.setSpeakingWhileMuted(true);
manager['soundDetectorCleanup'] = () => {};
manager['soundDetectorCleanup'] = async () => {};

await manager.enable();

Expand Down Expand Up @@ -411,6 +425,88 @@ describe('MicrophoneManager', () => {
});
});

describe('performTest', () => {
it('should return true when microphone captures audio', async () => {
const mock = vi.mocked(createNoAudioDetector);

mock.mockImplementationOnce((_stream, options) => {
// Simulate audio detected immediately
setImmediate(() => options.onCaptureStatusChange(true));
return async () => {};
});

const capturesAudio = await manager.performTest('test-device-id');
expect(capturesAudio).toBe(true);
});

it('should return false when microphone does not capture audio', async () => {
const mock = vi.mocked(createNoAudioDetector);

mock.mockImplementationOnce((_stream, options) => {
// Simulate no audio detected after test duration
setImmediate(() => options.onCaptureStatusChange(false));
return async () => {};
});

const capturesAudio = await manager.performTest('test-device-id');
expect(capturesAudio).toBe(false);
});

it('should use custom testDurationMs when provided', async () => {
const mock = vi.mocked(createNoAudioDetector);
let capturedOptions: NoAudioDetectorOptions;

mock.mockImplementationOnce((_stream, options) => {
capturedOptions = options;
setTimeout(() => options.onCaptureStatusChange(true), 50);
return async () => {};
});

const customDuration = 5000;
await manager.performTest('test-device-id', {
testDurationMs: customDuration,
});

expect(capturedOptions.noAudioThresholdMs).toBe(customDuration);
expect(capturedOptions.emitIntervalMs).toBe(customDuration);
});

it('should call getStream with exact deviceId', async () => {
const mock = vi.mocked(createNoAudioDetector);
mock.mockImplementationOnce((_stream, options) => {
setTimeout(() => options.onCaptureStatusChange(true), 50);
return async () => {};
});

const deviceId = 'specific-device-id';
await manager.performTest(deviceId);

expect(getAudioStream).toHaveBeenCalledWith(
{ deviceId: { exact: deviceId } },
expect.any(Tracer),
);
});

it('should cleanup detector and dispose stream after test completes', async () => {
const mock = vi.mocked(createNoAudioDetector);
const cleanupFn = vi.fn(async () => {});
let onCaptureStatusChange: ((capturesAudio: boolean) => void) | undefined;

mock.mockImplementationOnce((_stream, options) => {
onCaptureStatusChange = options.onCaptureStatusChange;
setTimeout(() => onCaptureStatusChange?.(true), 50);
return cleanupFn;
});

await manager.performTest('test-device-id');

// Wait for cleanup to be called
await vi.waitFor(() => {
expect(cleanupFn).toHaveBeenCalled();
});
});
});

afterEach(() => {
vi.clearAllMocks();
vi.resetModules();
Expand Down
1 change: 1 addition & 0 deletions packages/client/src/devices/__tests__/mocks.ts
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ export const mockAudioStream = () => {
getSettings: () => ({
deviceId: mockAudioDevices[0].deviceId,
}),
label: mockAudioDevices[0].label,
enabled: true,
readyState: 'live',
stop: () => {
Expand Down
Loading
Loading