diff --git a/.changes/pre-connect-audio-web b/.changes/pre-connect-audio-web new file mode 100644 index 000000000..d21408104 --- /dev/null +++ b/.changes/pre-connect-audio-web @@ -0,0 +1 @@ +patch type="added" "Add web support for pre-connect audio buffer" diff --git a/lib/src/preconnect/audio_frame_capture.dart b/lib/src/preconnect/audio_frame_capture.dart new file mode 100644 index 000000000..ba3f983a3 --- /dev/null +++ b/lib/src/preconnect/audio_frame_capture.dart @@ -0,0 +1,60 @@ +// Copyright 2025 LiveKit, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import 'dart:typed_data' show Uint8List; + +import 'package:flutter_webrtc/flutter_webrtc.dart' show MediaStreamTrack; + +import 'audio_frame_capture_native.dart' if (dart.library.js_interop) 'audio_frame_capture_web.dart'; + +/// A single frame of raw PCM audio data. +class AudioFrame { + final int sampleRate; + final int channels; + final Uint8List data; + final String commonFormat; + + const AudioFrame({ + required this.sampleRate, + required this.channels, + required this.data, + required this.commonFormat, + }); +} + +/// Platform-agnostic interface for capturing raw PCM audio frames from a track. +/// +/// On native (iOS/Android), this uses MethodChannel + EventChannel. +/// On web, this uses Web Audio API with AudioWorklet. +abstract class AudioFrameCapture { + /// Stream of raw PCM audio frames. + Stream get frameStream; + + /// Start capturing audio from the given [track]. + /// + /// Returns `true` if the renderer started successfully. + Future start({ + required MediaStreamTrack track, + required String rendererId, + required int sampleRate, + required int channels, + required String commonFormat, + }); + + /// Stop capturing and release resources. + Future stop(); +} + +/// Factory that returns the platform-appropriate implementation. +AudioFrameCapture createAudioFrameCapture() => createAudioFrameCaptureImpl(); diff --git a/lib/src/preconnect/audio_frame_capture_native.dart b/lib/src/preconnect/audio_frame_capture_native.dart new file mode 100644 index 000000000..bea7587b2 --- /dev/null +++ b/lib/src/preconnect/audio_frame_capture_native.dart @@ -0,0 +1,93 @@ +// Copyright 2025 LiveKit, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import 'dart:async'; +import 'dart:typed_data' show Uint8List; + +import 'package:flutter/services.dart' show EventChannel; + +import 'package:flutter_webrtc/flutter_webrtc.dart' show MediaStreamTrack; + +import '../logger.dart'; +import '../support/native.dart'; +import 'audio_frame_capture.dart'; + +/// Native (iOS/Android/macOS) implementation using MethodChannel + EventChannel. +class AudioFrameCaptureNative implements AudioFrameCapture { + EventChannel? _eventChannel; + StreamSubscription? _streamSubscription; + final _controller = StreamController.broadcast(); + String? _rendererId; + + @override + Stream get frameStream => _controller.stream; + + @override + Future start({ + required MediaStreamTrack track, + required String rendererId, + required int sampleRate, + required int channels, + required String commonFormat, + }) async { + final result = await Native.startAudioRenderer( + trackId: track.id!, + rendererId: rendererId, + format: { + 'commonFormat': commonFormat, + 'sampleRate': sampleRate, + 'channels': channels, + }, + ); + + if (result != true) return false; + + _rendererId = rendererId; + + _eventChannel = EventChannel('io.livekit.audio.renderer/channel-$rendererId'); + _streamSubscription = _eventChannel?.receiveBroadcastStream().listen((event) { + try { + _controller.add(AudioFrame( + sampleRate: event['sampleRate'] as int, + channels: event['channels'] as int, + data: event['data'] as Uint8List, + commonFormat: (event['commonFormat'] as String?) ?? commonFormat, + )); + } catch (e) { + logger.warning('[AudioFrameCapture] Error parsing native event: $e'); + } + }); + + return true; + } + + @override + Future stop() async { + await _streamSubscription?.cancel(); + _streamSubscription = null; + _eventChannel = null; + + final rendererId = _rendererId; + if (rendererId != null) { + await Native.stopAudioRenderer(rendererId: rendererId); + _rendererId = null; + } + + if (!_controller.isClosed) { + await _controller.close(); + } + } +} + +AudioFrameCapture createAudioFrameCaptureImpl() => AudioFrameCaptureNative(); diff --git a/lib/src/preconnect/audio_frame_capture_web.dart b/lib/src/preconnect/audio_frame_capture_web.dart new file mode 100644 index 000000000..e956acd78 --- /dev/null +++ b/lib/src/preconnect/audio_frame_capture_web.dart @@ -0,0 +1,186 @@ +// Copyright 2025 LiveKit, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import 'dart:async'; +import 'dart:js_interop'; +import 'dart:js_interop_unsafe'; +import 'dart:typed_data' show Uint8List; + +import 'package:dart_webrtc/dart_webrtc.dart' show MediaStreamTrackWeb; +import 'package:flutter_webrtc/flutter_webrtc.dart' show MediaStreamTrack; +import 'package:web/web.dart' as web; + +import '../logger.dart'; +import '../support/audio_pcm_utils.dart'; +import 'audio_frame_capture.dart'; + +/// JavaScript source for the AudioWorkletProcessor. +/// +/// Runs on the audio rendering thread. Forwards raw float32 input samples +/// to the main thread via MessagePort. +const _workletProcessorJs = ''' +class AudioRendererProcessor extends AudioWorkletProcessor { + process(inputs, outputs, parameters) { + const input = inputs[0]; + if (input && input.length > 0 && input[0].length > 0) { + const channels = input.length; + const frames = input[0].length; + + // Interleave channels into a single Float32Array. + const interleaved = new Float32Array(frames * channels); + for (let frame = 0; frame < frames; frame++) { + for (let ch = 0; ch < channels; ch++) { + interleaved[frame * channels + ch] = input[ch][frame]; + } + } + + this.port.postMessage({ + samples: interleaved.buffer, + channels: channels, + frames: frames, + }, [interleaved.buffer]); + } + return true; + } +} +registerProcessor('audio-renderer-processor', AudioRendererProcessor); +'''; + +/// Web implementation using AudioWorklet to capture raw PCM frames. +class AudioFrameCaptureWeb implements AudioFrameCapture { + web.AudioContext? _audioContext; + web.AudioWorkletNode? _workletNode; + web.AudioNode? _sourceNode; + StreamController? _controller; + String _targetFormat = 'int16'; + int _targetChannels = 1; + + @override + Stream get frameStream => (_controller ??= StreamController.broadcast()).stream; + + @override + Future start({ + required MediaStreamTrack track, + required String rendererId, + required int sampleRate, + required int channels, + required String commonFormat, + }) async { + _targetFormat = commonFormat; + _targetChannels = channels; + _controller ??= StreamController.broadcast(); + + try { + // 1. Get the underlying JS MediaStreamTrack. + final jsTrack = (track as MediaStreamTrackWeb).jsTrack; + final mediaStream = web.MediaStream([jsTrack].toJS); + + // 2. Create AudioContext. + _audioContext = web.AudioContext(); + final ctx = _audioContext!; + + // 3. Register worklet processor via Blob URL. + final blob = web.Blob( + [_workletProcessorJs.toJS].toJS, + web.BlobPropertyBag(type: 'application/javascript'), + ); + final blobUrl = web.URL.createObjectURL(blob); + try { + await ctx.audioWorklet.addModule(blobUrl).toDart; + } finally { + web.URL.revokeObjectURL(blobUrl); + } + + // 4. Create audio pipeline: source → worklet → destination. + _sourceNode = ctx.createMediaStreamSource(mediaStream); + _workletNode = web.AudioWorkletNode(ctx, 'audio-renderer-processor'); + + _sourceNode!.connect(_workletNode!); + // No destination connection needed — process() is called as long as + // there is input flowing and it returns true. Connecting to + // ctx.destination would route mic audio to speakers (echo/feedback). + + // 5. Listen for PCM frames from the worklet thread. + _workletNode!.port.onmessage = _onWorkletMessage.toJS; + + return true; + } catch (e) { + logger.warning('[AudioFrameCapture] Failed to start web capture: $e'); + await stop(); + return false; + } + } + + void _onWorkletMessage(web.MessageEvent event) { + final controller = _controller; + if (controller == null || controller.isClosed) return; + + try { + final data = event.data as JSObject; + final samplesBuffer = (data.getProperty('samples'.toJS) as JSArrayBuffer).toDart; + final channels = (data.getProperty('channels'.toJS) as JSNumber).toDartInt; + final frames = (data.getProperty('frames'.toJS) as JSNumber).toDartInt; + + final outChannels = _targetChannels.clamp(1, channels); + final actualSampleRate = _audioContext?.sampleRate.toInt() ?? 48000; + final srcFloat32 = samplesBuffer.asFloat32List(); + + final Uint8List bytes; + if (_targetFormat == 'float32') { + bytes = float32ToFloat32Bytes(srcFloat32, channels, outChannels, frames); + } else { + bytes = float32ToInt16Bytes(srcFloat32, channels, outChannels, frames); + } + + controller.add(AudioFrame( + sampleRate: actualSampleRate, + channels: outChannels, + data: bytes, + commonFormat: _targetFormat, + )); + } catch (e) { + logger.warning('[AudioFrameCapture] Error processing worklet frame: $e'); + } + } + + @override + Future stop() async { + _workletNode?.port.onmessage = null; + + try { + _workletNode?.disconnect(); + } catch (_) {} + _workletNode = null; + + try { + _sourceNode?.disconnect(); + } catch (_) {} + _sourceNode = null; + + try { + if (_audioContext?.state != 'closed') { + await _audioContext?.close().toDart; + } + } catch (_) {} + _audioContext = null; + + final controller = _controller; + _controller = null; + if (controller != null && !controller.isClosed) { + await controller.close(); + } + } +} + +AudioFrameCapture createAudioFrameCaptureImpl() => AudioFrameCaptureWeb(); diff --git a/lib/src/preconnect/pre_connect_audio_buffer.dart b/lib/src/preconnect/pre_connect_audio_buffer.dart index 8f3ad764a..0dedf25ea 100644 --- a/lib/src/preconnect/pre_connect_audio_buffer.dart +++ b/lib/src/preconnect/pre_connect_audio_buffer.dart @@ -13,9 +13,8 @@ // limitations under the License. import 'dart:async'; -import 'dart:typed_data' show Uint8List; -import 'package:flutter/services.dart' show EventChannel; +import 'package:flutter/foundation.dart' show kIsWeb; import 'package:flutter_webrtc/flutter_webrtc.dart' as webrtc; import 'package:uuid/uuid.dart'; @@ -25,12 +24,12 @@ import '../events.dart'; import '../logger.dart'; import '../participant/local.dart'; import '../support/byte_ring_buffer.dart'; -import '../support/native.dart'; import '../support/reusable_completer.dart'; import '../track/local/audio.dart'; import '../types/data_stream.dart'; import '../types/other.dart'; import '../types/participant_state.dart'; +import 'audio_frame_capture.dart'; typedef PreConnectOnError = void Function(Object error); @@ -57,10 +56,9 @@ class PreConnectAudioBuffer { // Internal states bool _isRecording = false; bool _isBufferSent = false; - String? _rendererId; LocalAudioTrack? _localTrack; - EventChannel? _eventChannel; + AudioFrameCapture? _audioCapture; StreamSubscription? _streamSubscription; PreConnectOnError? _onError; @@ -128,18 +126,17 @@ class PreConnectAudioBuffer { final rendererId = Uuid().v4(); logger.info('Starting audio renderer with rendererId: $rendererId'); - final result = await Native.startAudioRenderer( - trackId: _localTrack!.mediaStreamTrack.id!, + _audioCapture = createAudioFrameCapture(); + final result = await _audioCapture!.start( + track: _localTrack!.mediaStreamTrack, rendererId: rendererId, - format: { - 'commonFormat': 'int16', - 'sampleRate': _requestSampleRate, - 'channels': 1, - }, + sampleRate: _requestSampleRate, + channels: 1, + commonFormat: 'int16', ); - if (result != true) { - final error = StateError('Failed to start native audio renderer ($result)'); + if (!result) { + final error = StateError('Failed to start audio renderer ($result)'); logger.severe('[Preconnect audio] $error'); _onError?.call(error); await stopRecording(withError: error); @@ -148,35 +145,25 @@ class PreConnectAudioBuffer { throw error; } - await webrtc.NativeAudioManagement.startLocalRecording(); - _nativeRecordingStarted = true; - - _rendererId = rendererId; + if (!kIsWeb) { + await webrtc.NativeAudioManagement.startLocalRecording(); + _nativeRecordingStarted = true; + } logger.info('startAudioRenderer result: $result'); - _eventChannel = EventChannel('io.livekit.audio.renderer/channel-$rendererId'); - _streamSubscription = _eventChannel?.receiveBroadcastStream().listen((event) async { - if (!_isRecording) { - return; - } + _streamSubscription = _audioCapture!.frameStream.listen((frame) { + if (!_isRecording) return; - try { - // Audio format can differ from what was requested. - _renderedSampleRate = event['sampleRate'] as int; - _renderedChannels = event['channels'] as int; - // Native sends raw interleaved PCM bytes. - final Uint8List bytes = event['data'] as Uint8List; - - final didOverflow = _buffer.write(bytes); - if (didOverflow && !_hasLoggedOverflow) { - _hasLoggedOverflow = true; - logger.warning( - '[Preconnect audio] buffer exceeded ${defaultMaxSize ~/ 1024}KB, dropping oldest audio until agent is ready', - ); - } - } catch (e) { - logger.warning('[Preconnect audio] Error parsing event: $e'); + _renderedSampleRate = frame.sampleRate; + _renderedChannels = frame.channels; + + final didOverflow = _buffer.write(frame.data); + if (didOverflow && !_hasLoggedOverflow) { + _hasLoggedOverflow = true; + logger.warning( + '[Preconnect audio] buffer exceeded ${defaultMaxSize ~/ 1024}KB, dropping oldest audio until agent is ready', + ); } }); @@ -206,7 +193,7 @@ class PreConnectAudioBuffer { )); } - /// Stops recording and releases native audio capture resources. + /// Stops recording and releases audio capture resources. /// /// If [withError] is provided, [agentReadyFuture] completes with that error. /// Otherwise, [agentReadyFuture] completes successfully (if not already @@ -219,20 +206,12 @@ class PreConnectAudioBuffer { await _streamSubscription?.cancel(); _streamSubscription = null; - // Dispose the event channel. - _eventChannel = null; - - final rendererId = _rendererId; - if (rendererId != null) { - await Native.stopAudioRenderer( - rendererId: rendererId, - ); - } - - _rendererId = null; + // Stop the audio capture. + await _audioCapture?.stop(); + _audioCapture = null; - // Stop native audio when errored - if (withError != null && _nativeRecordingStarted) { + // Stop native recording session if it was started. + if (_nativeRecordingStarted) { await webrtc.NativeAudioManagement.stopLocalRecording(); } @@ -327,6 +306,7 @@ class PreConnectAudioBuffer { attributes: { 'sampleRate': sampleRate.toString(), 'channels': channels.toString(), + 'commonFormat': 'int16', 'trackId': localTrackSid, }, totalSize: data.length, diff --git a/lib/src/support/audio_pcm_utils.dart b/lib/src/support/audio_pcm_utils.dart new file mode 100644 index 000000000..61be81d43 --- /dev/null +++ b/lib/src/support/audio_pcm_utils.dart @@ -0,0 +1,67 @@ +// Copyright 2025 LiveKit, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import 'dart:typed_data' show ByteData, Endian, Float32List, Uint8List; + +/// Convert interleaved float32 samples to int16 PCM bytes (little-endian). +/// +/// [srcFloat32] contains interleaved samples with [srcChannels] channels. +/// The output contains only the first [outChannels] channels, with each sample +/// clamped to [-1.0, 1.0] and scaled to the int16 range. +Uint8List float32ToInt16Bytes( + Float32List srcFloat32, + int srcChannels, + int outChannels, + int frames, +) { + final out = ByteData(frames * outChannels * 2); + + for (var frame = 0; frame < frames; frame++) { + for (var ch = 0; ch < outChannels; ch++) { + final sample = srcFloat32[frame * srcChannels + ch]; + final clamped = sample.clamp(-1.0, 1.0); + final int16 = (clamped * 32767).round(); + out.setInt16((frame * outChannels + ch) * 2, int16, Endian.little); + } + } + + return out.buffer.asUint8List(); +} + +/// Extract interleaved float32 samples as raw bytes (little-endian). +/// +/// If [srcChannels] equals [outChannels], the source buffer is returned +/// directly (zero-copy). Otherwise, only the first [outChannels] are kept. +Uint8List float32ToFloat32Bytes( + Float32List srcFloat32, + int srcChannels, + int outChannels, + int frames, +) { + if (srcChannels == outChannels) { + return srcFloat32.buffer.asUint8List(); + } + + final out = ByteData(frames * outChannels * 4); + for (var frame = 0; frame < frames; frame++) { + for (var ch = 0; ch < outChannels; ch++) { + out.setFloat32( + (frame * outChannels + ch) * 4, + srcFloat32[frame * srcChannels + ch], + Endian.little, + ); + } + } + return out.buffer.asUint8List(); +} diff --git a/test/preconnect/audio_frame_capture_test.dart b/test/preconnect/audio_frame_capture_test.dart new file mode 100644 index 000000000..d4a536825 --- /dev/null +++ b/test/preconnect/audio_frame_capture_test.dart @@ -0,0 +1,429 @@ +import 'dart:async'; +import 'dart:typed_data'; + +import 'package:flutter_test/flutter_test.dart'; + +import 'package:livekit_client/src/preconnect/audio_frame_capture.dart'; +import 'package:livekit_client/src/support/audio_pcm_utils.dart'; +import 'package:livekit_client/src/support/byte_ring_buffer.dart'; + +/// A mock AudioFrameCapture that emits frames from a StreamController. +class MockAudioFrameCapture implements AudioFrameCapture { + final _controller = StreamController.broadcast(); + bool started = false; + bool stopped = false; + + @override + Stream get frameStream => _controller.stream; + + @override + Future start({ + required dynamic track, + required String rendererId, + required int sampleRate, + required int channels, + required String commonFormat, + }) async { + started = true; + return true; + } + + @override + Future stop() async { + stopped = true; + if (!_controller.isClosed) { + await _controller.close(); + } + } + + /// Emit a frame for testing. + void emitFrame(AudioFrame frame) => _controller.add(frame); +} + +/// Helper: build int16 PCM bytes from a list of sample values. +Uint8List int16Bytes(List samples) { + final data = ByteData(samples.length * 2); + for (var i = 0; i < samples.length; i++) { + data.setInt16(i * 2, samples[i], Endian.little); + } + return data.buffer.asUint8List(); +} + +/// Helper: build float32 bytes from a list of sample values. +Uint8List float32Bytes(List samples) { + final data = ByteData(samples.length * 4); + for (var i = 0; i < samples.length; i++) { + data.setFloat32(i * 4, samples[i], Endian.little); + } + return data.buffer.asUint8List(); +} + +/// Helper: read int16 sample at index from bytes (little-endian). +int readInt16(Uint8List bytes, int index) { + return ByteData.sublistView(bytes).getInt16(index * 2, Endian.little); +} + +/// Helper: read float32 sample at index from bytes (little-endian). +double readFloat32(Uint8List bytes, int index) { + return ByteData.sublistView(bytes).getFloat32(index * 4, Endian.little); +} + +void main() { + group('AudioFrame', () { + test('stores sample rate, channels, data, and format', () { + final frame = AudioFrame( + sampleRate: 48000, + channels: 1, + data: int16Bytes([100, -200, 300]), + commonFormat: 'int16', + ); + + expect(frame.sampleRate, 48000); + expect(frame.channels, 1); + expect(frame.commonFormat, 'int16'); + expect(frame.data.length, 6); // 3 samples * 2 bytes + }); + }); + + group('MockAudioFrameCapture', () { + test('start returns true and sets started flag', () async { + final capture = MockAudioFrameCapture(); + final result = await capture.start( + track: null, + rendererId: 'test-id', + sampleRate: 24000, + channels: 1, + commonFormat: 'int16', + ); + + expect(result, true); + expect(capture.started, true); + }); + + test('emitted frames arrive on frameStream', () async { + final capture = MockAudioFrameCapture(); + await capture.start( + track: null, + rendererId: 'test-id', + sampleRate: 24000, + channels: 1, + commonFormat: 'int16', + ); + + final frames = []; + final sub = capture.frameStream.listen(frames.add); + + capture.emitFrame(AudioFrame( + sampleRate: 24000, + channels: 1, + data: int16Bytes([1000, -1000]), + commonFormat: 'int16', + )); + + capture.emitFrame(AudioFrame( + sampleRate: 24000, + channels: 1, + data: int16Bytes([2000, -2000]), + commonFormat: 'int16', + )); + + // Let microtasks run. + await Future.delayed(Duration.zero); + + expect(frames.length, 2); + expect(frames[0].data.length, 4); + expect(frames[1].data.length, 4); + + await sub.cancel(); + await capture.stop(); + }); + + test('stop sets stopped flag', () async { + final capture = MockAudioFrameCapture(); + await capture.start( + track: null, + rendererId: 'test-id', + sampleRate: 24000, + channels: 1, + commonFormat: 'int16', + ); + await capture.stop(); + + expect(capture.stopped, true); + }); + }); + + group('float32ToInt16Bytes', () { + test('converts silence to zeros', () { + final src = Float32List.fromList([0.0, 0.0, 0.0, 0.0]); + final bytes = float32ToInt16Bytes(src, 1, 1, 4); + + expect(bytes.length, 8); // 4 frames * 1 ch * 2 bytes + for (var i = 0; i < 4; i++) { + expect(readInt16(bytes, i), 0); + } + }); + + test('converts full-scale float to int16 range', () { + final src = Float32List.fromList([1.0, -1.0]); + final bytes = float32ToInt16Bytes(src, 1, 1, 2); + + expect(readInt16(bytes, 0), 32767); + expect(readInt16(bytes, 1), -32767); + }); + + test('clamps values outside [-1.0, 1.0]', () { + final src = Float32List.fromList([2.5, -3.0]); + final bytes = float32ToInt16Bytes(src, 1, 1, 2); + + expect(readInt16(bytes, 0), 32767); + expect(readInt16(bytes, 1), -32767); + }); + + test('converts mid-range values correctly', () { + final src = Float32List.fromList([0.5, -0.5]); + final bytes = float32ToInt16Bytes(src, 1, 1, 2); + + // 0.5 * 32767 = 16383.5 → rounds to 16384 + expect(readInt16(bytes, 0), 16384); + // -0.5 * 32767 = -16383.5 → rounds to -16384 + expect(readInt16(bytes, 1), -16384); + }); + + test('stereo to mono downmix keeps first channel', () { + // Interleaved stereo: [L0, R0, L1, R1] + final src = Float32List.fromList([0.5, -0.5, 0.25, -0.25]); + final bytes = float32ToInt16Bytes(src, 2, 1, 2); + + expect(bytes.length, 4); // 2 frames * 1 ch * 2 bytes + // Should contain L0 and L1 only. + expect(readInt16(bytes, 0), (0.5 * 32767).round()); + expect(readInt16(bytes, 1), (0.25 * 32767).round()); + }); + + test('stereo pass-through preserves both channels', () { + final src = Float32List.fromList([0.5, -0.5, 0.25, -0.25]); + final bytes = float32ToInt16Bytes(src, 2, 2, 2); + + expect(bytes.length, 8); // 2 frames * 2 ch * 2 bytes + expect(readInt16(bytes, 0), (0.5 * 32767).round()); + expect(readInt16(bytes, 1), (-0.5 * 32767).round()); + expect(readInt16(bytes, 2), (0.25 * 32767).round()); + expect(readInt16(bytes, 3), (-0.25 * 32767).round()); + }); + }); + + group('float32ToFloat32Bytes', () { + test('same channel count returns buffer directly (zero-copy)', () { + final src = Float32List.fromList([0.1, 0.2, 0.3, 0.4]); + final bytes = float32ToFloat32Bytes(src, 1, 1, 4); + + expect(bytes.length, 16); // 4 frames * 1 ch * 4 bytes + for (var i = 0; i < 4; i++) { + expect(readFloat32(bytes, i), closeTo(src[i], 1e-6)); + } + }); + + test('stereo to mono keeps first channel', () { + final src = Float32List.fromList([0.1, 0.9, 0.2, 0.8]); + final bytes = float32ToFloat32Bytes(src, 2, 1, 2); + + expect(bytes.length, 8); // 2 frames * 1 ch * 4 bytes + expect(readFloat32(bytes, 0), closeTo(0.1, 1e-6)); + expect(readFloat32(bytes, 1), closeTo(0.2, 1e-6)); + }); + + test('preserves negative values', () { + final src = Float32List.fromList([-1.0, 0.0, 1.0]); + final bytes = float32ToFloat32Bytes(src, 1, 1, 3); + + expect(readFloat32(bytes, 0), closeTo(-1.0, 1e-6)); + expect(readFloat32(bytes, 1), closeTo(0.0, 1e-6)); + expect(readFloat32(bytes, 2), closeTo(1.0, 1e-6)); + }); + }); + + group('Buffer flow end-to-end', () { + test('frames flow into ByteRingBuffer correctly', () async { + final capture = MockAudioFrameCapture(); + final buffer = ByteRingBuffer(4096); + + await capture.start( + track: null, + rendererId: 'test-id', + sampleRate: 24000, + channels: 1, + commonFormat: 'int16', + ); + + int? capturedSampleRate; + int? capturedChannels; + + final sub = capture.frameStream.listen((frame) { + capturedSampleRate = frame.sampleRate; + capturedChannels = frame.channels; + buffer.write(frame.data); + }); + + // Simulate 3 frames of 480 samples each (10ms at 48kHz mono int16). + for (var i = 0; i < 3; i++) { + final samples = List.generate(480, (j) => (j * 10) - 2400); + capture.emitFrame(AudioFrame( + sampleRate: 48000, + channels: 1, + data: int16Bytes(samples), + commonFormat: 'int16', + )); + } + + await Future.delayed(Duration.zero); + + expect(capturedSampleRate, 48000); + expect(capturedChannels, 1); + // 3 frames * 480 samples * 2 bytes = 2880 + expect(buffer.length, 2880); + + final bytes = buffer.takeBytes(); + expect(bytes.length, 2880); + // Buffer should be empty after takeBytes. + expect(buffer.length, 0); + + await sub.cancel(); + await capture.stop(); + }); + + test('buffer overflow drops oldest data', () async { + final capture = MockAudioFrameCapture(); + // Small buffer: 100 bytes. + final buffer = ByteRingBuffer(100); + + await capture.start( + track: null, + rendererId: 'test-id', + sampleRate: 24000, + channels: 1, + commonFormat: 'int16', + ); + + bool overflowed = false; + final sub = capture.frameStream.listen((frame) { + if (buffer.write(frame.data)) { + overflowed = true; + } + }); + + // Write 60 bytes, then 60 more → should overflow at 100. + capture.emitFrame(AudioFrame( + sampleRate: 24000, + channels: 1, + data: Uint8List(60), + commonFormat: 'int16', + )); + capture.emitFrame(AudioFrame( + sampleRate: 24000, + channels: 1, + data: Uint8List(60), + commonFormat: 'int16', + )); + + await Future.delayed(Duration.zero); + + expect(overflowed, true); + expect(buffer.length, 100); + + await sub.cancel(); + await capture.stop(); + }); + + test('float32 frames through conversion then buffer', () async { + final capture = MockAudioFrameCapture(); + final buffer = ByteRingBuffer(4096); + + await capture.start( + track: null, + rendererId: 'test-id', + sampleRate: 48000, + channels: 1, + commonFormat: 'int16', + ); + + // Simulate what the web implementation does: receive float32 from + // worklet, convert to int16 bytes, then buffer. + final sub = capture.frameStream.listen((frame) { + final srcFloat32 = frame.data.buffer.asFloat32List(); + final int16Bytes = float32ToInt16Bytes(srcFloat32, 1, 1, srcFloat32.length); + buffer.write(int16Bytes); + }); + + // Emit raw float32 data (as if from the worklet). + final samples = Float32List.fromList(List.generate(128, (i) => i / 128.0)); + capture.emitFrame(AudioFrame( + sampleRate: 48000, + channels: 1, + data: samples.buffer.asUint8List(), + commonFormat: 'float32', + )); + + await Future.delayed(Duration.zero); + + // 128 samples * 2 bytes (int16) = 256 + expect(buffer.length, 256); + + // Verify first and last converted values. + final result = buffer.takeBytes(); + final view = ByteData.sublistView(result); + + // Sample 0: 0.0 → int16 = 0 + expect(view.getInt16(0, Endian.little), 0); + // Sample 127: 127/128 ≈ 0.9921875 → int16 ≈ 32511 + final last = view.getInt16(127 * 2, Endian.little); + expect(last, closeTo((127 / 128.0 * 32767).round(), 1)); + + await sub.cancel(); + await capture.stop(); + }); + + test('stereo to mono conversion in buffer pipeline', () async { + final capture = MockAudioFrameCapture(); + final buffer = ByteRingBuffer(4096); + + await capture.start( + track: null, + rendererId: 'test-id', + sampleRate: 48000, + channels: 1, + commonFormat: 'int16', + ); + + final sub = capture.frameStream.listen((frame) { + final srcFloat32 = frame.data.buffer.asFloat32List(); + // 2 src channels → 1 out channel (mono downmix) + final int16Data = float32ToInt16Bytes(srcFloat32, 2, 1, srcFloat32.length ~/ 2); + buffer.write(int16Data); + }); + + // Interleaved stereo: [L0=0.5, R0=-0.5, L1=0.25, R1=-0.25] + final stereo = Float32List.fromList([0.5, -0.5, 0.25, -0.25]); + capture.emitFrame(AudioFrame( + sampleRate: 48000, + channels: 2, + data: stereo.buffer.asUint8List(), + commonFormat: 'float32', + )); + + await Future.delayed(Duration.zero); + + // 2 frames * 1 channel * 2 bytes = 4 + expect(buffer.length, 4); + + final result = buffer.takeBytes(); + final view = ByteData.sublistView(result); + // Only left channel: 0.5 → 16384, 0.25 → 8192 + expect(view.getInt16(0, Endian.little), (0.5 * 32767).round()); + expect(view.getInt16(2, Endian.little), (0.25 * 32767).round()); + + await sub.cancel(); + await capture.stop(); + }); + }); +}