Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion packages/noise-cancellation-react-native/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
},
"homepage": "https://github.com/GetStream/stream-video-js#readme",
"devDependencies": {
"@stream-io/react-native-webrtc": "137.1.0",
"@stream-io/react-native-webrtc": "137.1.3",
"react": "19.1.0",
"react-native": "^0.81.5",
"react-native-builder-bob": "^0.37.0",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
package com.streamvideo.reactnative

import android.app.Activity
import android.content.BroadcastReceiver
import android.content.Context
import android.content.Intent
Expand All @@ -9,6 +10,7 @@ import android.graphics.Bitmap
import android.media.AudioAttributes
import android.media.AudioFormat
import android.media.AudioTrack
import android.media.projection.MediaProjectionManager
import android.net.Uri
import android.os.BatteryManager
import android.os.Build
Expand All @@ -23,6 +25,8 @@ import com.facebook.react.bridge.ReactMethod
import com.facebook.react.bridge.WritableMap
import com.facebook.react.modules.core.DeviceEventManagerModule.RCTDeviceEventEmitter
import com.oney.WebRTCModule.WebRTCModule
import com.oney.WebRTCModule.WebRTCModuleOptions
import com.streamvideo.reactnative.screenshare.ScreenAudioCapture
import com.streamvideo.reactnative.util.CallAlivePermissionsHelper
import com.streamvideo.reactnative.util.CallAliveServiceChecker
import com.streamvideo.reactnative.util.PiPHelper
Expand Down Expand Up @@ -52,6 +56,9 @@ class StreamVideoReactNativeModule(reactContext: ReactApplicationContext) :
private var busyToneAudioTrack: AudioTrack? = null
private var busyToneJob: Job? = null

// Screen share audio mixing
private var screenAudioCapture: ScreenAudioCapture? = null

private var thermalStatusListener: PowerManager.OnThermalStatusChangedListener? = null

private var batteryChargingStateReceiver = object : BroadcastReceiver() {
Expand Down Expand Up @@ -148,6 +155,7 @@ class StreamVideoReactNativeModule(reactContext: ReactApplicationContext) :
reactApplicationContext.unregisterReceiver(batteryChargingStateReceiver)
stopThermalStatusUpdates()
stopBusyToneInternal() // Clean up busy tone on invalidate
stopScreenShareAudioMixingInternal() // Clean up screen share audio on invalidate
super.invalidate()
}

Expand Down Expand Up @@ -484,6 +492,83 @@ class StreamVideoReactNativeModule(reactContext: ReactApplicationContext) :
return ShortArray(totalSamples)
}

@ReactMethod
fun startScreenShareAudioMixing(promise: Promise) {
try {
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.Q) {
promise.reject("API_LEVEL", "Screen audio capture requires Android 10 (API 29)+")
return
}

if (screenAudioCapture != null) {
Log.w(NAME, "Screen share audio mixing is already active")
promise.resolve(null)
return
}

val module = reactApplicationContext.getNativeModule(WebRTCModule::class.java)!!

// Get the MediaProjection permission result Intent from WebRTC
val permissionIntent = module.userMediaImpl?.mediaProjectionPermissionResultData
if (permissionIntent == null) {
promise.reject("NO_PROJECTION", "No MediaProjection permission available. Start screen sharing first.")
return
}

// Create a MediaProjection for audio capture
val mediaProjectionManager = reactApplicationContext.getSystemService(
Context.MEDIA_PROJECTION_SERVICE
) as MediaProjectionManager
val mediaProjection = mediaProjectionManager.getMediaProjection(
Activity.RESULT_OK, permissionIntent
)
if (mediaProjection == null) {
promise.reject("PROJECTION_ERROR", "Failed to create MediaProjection for audio capture")
return
}

screenAudioCapture = ScreenAudioCapture(mediaProjection).also { it.start() }

// Register the screen audio bytes provider so the AudioBufferCallback
// in WebRTCModule mixes screen audio into the mic buffer.
WebRTCModuleOptions.getInstance().screenAudioBytesProvider =
WebRTCModuleOptions.ScreenAudioBytesProvider { bytesRequested ->
screenAudioCapture?.getScreenAudioBytes(bytesRequested)
}

Log.d(NAME, "Screen share audio mixing started")
promise.resolve(null)
} catch (e: Exception) {
Log.e(NAME, "Error starting screen share audio mixing: ${e.message}")
promise.reject("ERROR", e.message, e)
}
}

@ReactMethod
fun stopScreenShareAudioMixing(promise: Promise) {
try {
stopScreenShareAudioMixingInternal()
promise.resolve(null)
} catch (e: Exception) {
Log.e(NAME, "Error stopping screen share audio mixing: ${e.message}")
promise.reject("ERROR", e.message, e)
}
}

private fun stopScreenShareAudioMixingInternal() {
try {
// Clear the provider so the AudioBufferCallback stops mixing
WebRTCModuleOptions.getInstance().screenAudioBytesProvider = null

screenAudioCapture?.stop()
screenAudioCapture = null

Log.d(NAME, "Screen share audio mixing stopped")
} catch (e: Exception) {
Log.e(NAME, "Error in stopScreenShareAudioMixingInternal: ${e.message}")
}
}

companion object {
private const val NAME = "StreamVideoReactNative"
private const val SAMPLE_RATE = 22050
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
package com.streamvideo.reactnative.screenshare

import android.annotation.SuppressLint
import android.media.AudioAttributes
import android.media.AudioFormat
import android.media.AudioPlaybackCaptureConfiguration
import android.media.AudioRecord
import android.media.projection.MediaProjection
import android.os.Build
import android.util.Log
import androidx.annotation.RequiresApi
import java.nio.ByteBuffer

/**
* Captures system media audio using [AudioPlaybackCaptureConfiguration].
*
* Uses the given [MediaProjection] to set up an [AudioRecord] that captures
* audio from media playback, games, and other apps (USAGE_MEDIA, USAGE_GAME,
* USAGE_UNKNOWN) but not notifications, alarms, or system sounds.
*
* Audio is captured in a pull-based manner via [getScreenAudioBytes], which
* reads exactly the requested number of bytes using [AudioRecord.READ_BLOCKING].
* This is designed to be called from the WebRTC audio processing thread.
*
* Format: 48kHz, mono, PCM 16-bit (matching WebRTC's audio pipeline).
*
* Requires Android 10 (API 29+).
*/
@RequiresApi(Build.VERSION_CODES.Q)
class ScreenAudioCapture(private val mediaProjection: MediaProjection) {

private var audioRecord: AudioRecord? = null
private var screenAudioBuffer: ByteBuffer? = null

companion object {
private const val TAG = "ScreenAudioCapture"
const val SAMPLE_RATE = 48000
private const val CHANNEL_CONFIG = AudioFormat.CHANNEL_IN_MONO
private const val AUDIO_FORMAT = AudioFormat.ENCODING_PCM_16BIT
}

@SuppressLint("MissingPermission")
fun start() {
val playbackConfig = AudioPlaybackCaptureConfiguration.Builder(mediaProjection)
.addMatchingUsage(AudioAttributes.USAGE_MEDIA)
.addMatchingUsage(AudioAttributes.USAGE_GAME)
.addMatchingUsage(AudioAttributes.USAGE_UNKNOWN)
.build()

val audioFormat = AudioFormat.Builder()
.setSampleRate(SAMPLE_RATE)
.setChannelMask(CHANNEL_CONFIG)
.setEncoding(AUDIO_FORMAT)
.build()

audioRecord = AudioRecord.Builder()
.setAudioFormat(audioFormat)
.setAudioPlaybackCaptureConfig(playbackConfig)
.build()

if (audioRecord?.state != AudioRecord.STATE_INITIALIZED) {
Log.e(TAG, "AudioRecord failed to initialize")
audioRecord?.release()
audioRecord = null
return
}

audioRecord?.startRecording()
Log.d(TAG, "Screen audio capture started")
}

/**
* Pull-based read: returns a [ByteBuffer] containing exactly [bytesRequested] bytes
* of captured screen audio.
*
* Called from the WebRTC audio processing thread. Uses [AudioRecord.READ_BLOCKING]
* so it will block until the requested bytes are available.
*
* @return A [ByteBuffer] with screen audio data, or `null` if capture is not active.
*/
fun getScreenAudioBytes(bytesRequested: Int): ByteBuffer? {
val record = audioRecord ?: return null
if (bytesRequested <= 0) return null

val buffer = screenAudioBuffer?.takeIf { it.capacity() >= bytesRequested }
?: ByteBuffer.allocateDirect(bytesRequested).also { screenAudioBuffer = it }

buffer.clear()
buffer.limit(bytesRequested)

val bytesRead = record.read(buffer, bytesRequested, AudioRecord.READ_BLOCKING)
if (bytesRead > 0) {
buffer.position(0)
buffer.limit(bytesRead)
return buffer
}
return null
}

fun stop() {
try {
audioRecord?.stop()
} catch (e: Exception) {
Log.w(TAG, "Error stopping AudioRecord: ${e.message}")
}
audioRecord?.release()
audioRecord = null
screenAudioBuffer = null
Log.d(TAG, "Screen audio capture stopped")
}
}
97 changes: 93 additions & 4 deletions packages/react-native-sdk/ios/StreamVideoReactNative.m
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,17 @@
#import "StreamVideoReactNative.h"
#import "WebRTCModule.h"
#import "WebRTCModuleOptions.h"
#import "InAppScreenCapturer.h"
#import <AVFoundation/AVFoundation.h>
#import <AudioToolbox/AudioToolbox.h>

// Import Swift-generated header for ScreenShareAudioMixer
#if __has_include(<stream_react_native_webrtc/stream_react_native_webrtc-Swift.h>)
#import <stream_react_native_webrtc/stream_react_native_webrtc-Swift.h>
#elif __has_include("stream_react_native_webrtc-Swift.h")
#import "stream_react_native_webrtc-Swift.h"
#endif

// Do not change these consts, it is what is used react-native-webrtc
NSNotificationName const kBroadcastStartedNotification = @"iOS_BroadcastStarted";
NSNotificationName const kBroadcastStoppedNotification = @"iOS_BroadcastStopped";
Expand Down Expand Up @@ -626,22 +634,22 @@ - (void)removeAudioInterruptionHandling {

- (void)audioSessionInterrupted:(NSNotification *)notification {
AVAudioSessionInterruptionType interruptionType = [notification.userInfo[AVAudioSessionInterruptionTypeKey] unsignedIntegerValue];

switch (interruptionType) {
case AVAudioSessionInterruptionTypeBegan:
if (_busyTonePlayer && _busyTonePlayer.isPlaying) {
[_busyTonePlayer pause];
}
break;

case AVAudioSessionInterruptionTypeEnded: {
AVAudioSessionInterruptionOptions options = [notification.userInfo[AVAudioSessionInterruptionOptionKey] unsignedIntegerValue];

if (options & AVAudioSessionInterruptionOptionShouldResume) {
// Reactivate audio session
NSError *error = nil;
[[AVAudioSession sharedInstance] setActive:YES error:&error];

if (!error && _busyTonePlayer) {
[_busyTonePlayer play];
} else if (error) {
Expand All @@ -653,4 +661,85 @@ - (void)audioSessionInterrupted:(NSNotification *)notification {
}
}

#pragma mark - In-App Screen Capture

RCT_EXPORT_METHOD(startInAppScreenCapture:(BOOL)includeAudio
resolve:(RCTPromiseResolveBlock)resolve
reject:(RCTPromiseRejectBlock)reject)
{
WebRTCModuleOptions *options = [WebRTCModuleOptions sharedInstance];
options.useInAppScreenCapture = YES;
options.includeScreenShareAudio = includeAudio;
resolve(nil);
}

RCT_EXPORT_METHOD(stopInAppScreenCapture:(RCTPromiseResolveBlock)resolve
reject:(RCTPromiseRejectBlock)reject)
{
WebRTCModuleOptions *options = [WebRTCModuleOptions sharedInstance];
options.useInAppScreenCapture = NO;
options.includeScreenShareAudio = NO;
resolve(nil);
}

#pragma mark - Screen Share Audio Mixing

RCT_EXPORT_METHOD(startScreenShareAudioMixing:(RCTPromiseResolveBlock)resolve
reject:(RCTPromiseRejectBlock)reject)
{
WebRTCModule *webrtcModule = [self.bridge moduleForClass:[WebRTCModule class]];
WebRTCModuleOptions *options = [WebRTCModuleOptions sharedInstance];

ScreenShareAudioMixer *mixer = webrtcModule.audioDeviceModule.screenShareAudioMixer;

// Wire mixer as capturePostProcessingDelegate on the audio processing module.
id<RTCAudioProcessingModule> apmId = options.audioProcessingModule;
if (apmId && [apmId isKindOfClass:[RTCDefaultAudioProcessingModule class]]) {
RTCDefaultAudioProcessingModule *apm = (RTCDefaultAudioProcessingModule *)apmId;
apm.capturePostProcessingDelegate = mixer;
NSLog(@"[SSAMixer] Set capturePostProcessingDelegate on APM");
} else {
NSLog(@"[SSAMixer] WARNING: No RTCDefaultAudioProcessingModule available, mixing will not work");
}

[mixer startMixing];

// Wire audio buffer handler on the active capturer → mixer.enqueue
InAppScreenCapturer *capturer = options.activeInAppScreenCapturer;
if (capturer) {
capturer.audioBufferHandler = ^(CMSampleBufferRef sampleBuffer) {
[mixer enqueue:sampleBuffer];
};
}

resolve(nil);
}

RCT_EXPORT_METHOD(stopScreenShareAudioMixing:(RCTPromiseResolveBlock)resolve
reject:(RCTPromiseRejectBlock)reject)
{
WebRTCModule *webrtcModule = [self.bridge moduleForClass:[WebRTCModule class]];
WebRTCModuleOptions *options = [WebRTCModuleOptions sharedInstance];

// Stop feeding audio to the mixer
InAppScreenCapturer *capturer = options.activeInAppScreenCapturer;
if (capturer) {
capturer.audioBufferHandler = nil;
}

// Stop mixing
ScreenShareAudioMixer *mixer = webrtcModule.audioDeviceModule.screenShareAudioMixer;
[mixer stopMixing];

// Clear capturePostProcessingDelegate
id<RTCAudioProcessingModule> apmId = options.audioProcessingModule;
if (apmId && [apmId isKindOfClass:[RTCDefaultAudioProcessingModule class]]) {
RTCDefaultAudioProcessingModule *apm = (RTCDefaultAudioProcessingModule *)apmId;
apm.capturePostProcessingDelegate = nil;
NSLog(@"[SSAMixer] Cleared capturePostProcessingDelegate on APM");
}

resolve(nil);
}

@end
4 changes: 2 additions & 2 deletions packages/react-native-sdk/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@
"@react-native-firebase/app": ">=17.5.0",
"@react-native-firebase/messaging": ">=17.5.0",
"@stream-io/noise-cancellation-react-native": ">=0.1.0",
"@stream-io/react-native-webrtc": ">=137.1.0",
"@stream-io/react-native-webrtc": ">=137.1.3",
"@stream-io/video-filters-react-native": ">=0.1.0",
"expo": ">=47.0.0",
"expo-build-properties": "*",
Expand Down Expand Up @@ -130,7 +130,7 @@
"@react-native-firebase/messaging": "^23.4.0",
"@react-native/babel-preset": "^0.81.5",
"@stream-io/noise-cancellation-react-native": "workspace:^",
"@stream-io/react-native-webrtc": "137.1.0",
"@stream-io/react-native-webrtc": "137.1.3",
"@stream-io/video-filters-react-native": "workspace:^",
"@testing-library/jest-native": "^5.4.3",
"@testing-library/react-native": "13.3.3",
Expand Down
Loading
Loading