@volley/recognition-client-sdk
Version:
Recognition Service TypeScript/Node.js Client SDK
889 lines (718 loc) • 28.9 kB
text/typescript
/**
* Unit tests for RealTimeTwoWayWebSocketRecognitionClient
*/
import { RealTimeTwoWayWebSocketRecognitionClient } from './recognition-client';
import { ClientState } from './recognition-client.types';
import { RecognitionResultTypeV1 } from '@recog/shared-types';
import { WebSocket as MockWebSocket } from 'ws';
// Mock WebSocket
jest.mock('ws');
describe('RealTimeTwoWayWebSocketRecognitionClient', () => {
let client: RealTimeTwoWayWebSocketRecognitionClient;
let mockWs: any;
beforeEach(() => {
// Reset mocks
jest.clearAllMocks();
// Create mock WebSocket
mockWs = {
readyState: MockWebSocket.CONNECTING,
send: jest.fn(),
close: jest.fn(),
on: jest.fn(),
removeAllListeners: jest.fn(),
};
// Mock WebSocket constructor
(MockWebSocket as any).mockImplementation(() => mockWs);
// Create client
client = new RealTimeTwoWayWebSocketRecognitionClient({
url: 'ws://test.example.com/recognize',
asrRequestConfig: {
provider: 'deepgram',
language: 'en',
sampleRate: 16000,
encoding: 'linear16'
}
});
});
afterEach(() => {
// Clean up
});
describe('Constructor', () => {
it('should initialize with correct default values', () => {
expect(client.getState()).toBe(ClientState.INITIAL);
expect(client.isConnected()).toBe(false);
expect(client.isBufferOverflowing()).toBe(false);
expect(client.getAudioUtteranceId()).toBeDefined();
expect(typeof client.getAudioUtteranceId()).toBe('string');
});
it('should have immutable audioUtteranceId', () => {
const originalId = client.getAudioUtteranceId();
// audioUtteranceId should not change
expect(client.getAudioUtteranceId()).toBe(originalId);
});
it('should expose the WebSocket URL via getUrl()', () => {
const url = client.getUrl();
expect(url).toBeDefined();
expect(typeof url).toBe('string');
expect(url).toContain('audioUtteranceId=');
});
it('should build URL from stage parameter', () => {
const stagingClient = new RealTimeTwoWayWebSocketRecognitionClient({
stage: 'staging',
asrRequestConfig: {
provider: 'deepgram',
language: 'en',
sampleRate: 16000,
encoding: 'linear16'
}
});
const url = stagingClient.getUrl();
expect(url).toBeDefined();
// URL should be built from stage (exact URL depends on mocked getRecognitionServiceBase)
expect(url).toContain('/ws/v1/recognize');
});
it('should prioritize url over stage when both provided', () => {
const explicitUrlClient = new RealTimeTwoWayWebSocketRecognitionClient({
url: 'ws://custom.example.com/ws/v1/recognize',
stage: 'staging',
asrRequestConfig: {
provider: 'deepgram',
language: 'en',
sampleRate: 16000,
encoding: 'linear16'
}
});
const url = explicitUrlClient.getUrl();
expect(url).toContain('ws://custom.example.com/ws/v1/recognize');
expect(url).not.toContain('staging');
});
it('should initialize stats correctly', () => {
const stats = client.getStats();
expect(stats.audioBytesSent).toBe(0);
expect(stats.audioChunksSent).toBe(0);
expect(stats.audioChunksBuffered).toBe(0);
expect(stats.bufferOverflowCount).toBe(0);
expect(stats.currentBufferedChunks).toBe(0);
});
});
describe.skip('State Management', () => {
it('should transition from INITIAL to CONNECTING on connect()', async () => {
expect(client.getState()).toBe(ClientState.INITIAL);
// Simulate successful connection
const connectPromise = client.connect();
expect(client.getState()).toBe(ClientState.CONNECTING);
// Simulate WebSocket open event
mockWs.readyState = MockWebSocket.OPEN;
const openHandler = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'open')[1];
openHandler();
await connectPromise;
expect(client.getState()).toBe(ClientState.CONNECTED);
});
it('should transition to READY when server sends ready message', async () => {
// Connect first
const connectPromise = client.connect();
mockWs.readyState = MockWebSocket.OPEN;
const openHandler = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'open')[1];
openHandler();
await connectPromise;
// Simulate ready message
const messageHandler = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'message')[1];
const readyMessage = JSON.stringify({
v: 1,
type: 'message',
data: {
type: 'ClientControlMessage',
action: 'ready_for_uploading_recording',
audioUtteranceId: 'test-utterance-id'
}
});
messageHandler(readyMessage);
expect(client.getState()).toBe(ClientState.READY);
expect(client.isConnected()).toBe(true);
});
it('should transition to STOPPING when stopRecording() is called', async () => {
// Setup: Connect and become ready
await setupConnectedClient();
// Call stopRecording
const stopPromise = client.stopRecording();
expect(client.getState()).toBe(ClientState.STOPPING);
// Simulate final transcript
const messageHandler = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'message')[1];
const finalMessage = JSON.stringify({
v: 1,
type: 'message',
data: {
type: 'Transcription',
finalTranscript: 'test',
is_finished: true
}
});
messageHandler(finalMessage);
await stopPromise;
expect(client.getState()).toBe(ClientState.STOPPED);
});
it('should transition to FAILED on connection error', async () => {
const connectPromise = client.connect();
// Simulate error
const errorHandler = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'error')[1];
errorHandler(new Error('Connection failed'));
try {
await connectPromise;
} catch (e) {
// Expected
}
expect(client.getState()).toBe(ClientState.FAILED);
});
});
describe.skip('Connection Handling', () => {
it('should handle duplicate connect() calls', async () => {
// Call connect twice
const promise1 = client.connect();
const promise2 = client.connect();
// Should be the same promise
expect(promise1).toBe(promise2);
// Simulate successful connection
mockWs.readyState = MockWebSocket.OPEN;
const openHandler = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'open')[1];
openHandler();
await Promise.all([promise1, promise2]);
expect(client.getState()).toBe(ClientState.CONNECTED);
});
it('should not reconnect if already connected', async () => {
// First connection
await setupConnectedClient();
const firstWs = mockWs;
// Try to connect again
await client.connect();
// Should not create new WebSocket
expect(MockWebSocket).toHaveBeenCalledTimes(1);
expect(client.isConnected()).toBe(true);
});
});
describe.skip('Audio Handling', () => {
it('should buffer audio when not ready', () => {
const audioData = Buffer.from([1, 2, 3, 4]);
client.sendAudio(audioData);
const stats = client.getStats();
expect(stats.audioBytesSent).toBe(0);
expect(stats.audioChunksBuffered).toBe(1);
expect(stats.currentBufferedChunks).toBe(1);
});
it('should send audio immediately when ready', async () => {
await setupReadyClient();
const audioData = Buffer.from([1, 2, 3, 4]);
client.sendAudio(audioData);
// Should have sent the audio
expect(mockWs.send).toHaveBeenCalled();
const stats = client.getStats();
expect(stats.audioBytesSent).toBe(4);
expect(stats.audioChunksSent).toBe(1);
});
it('should flush buffered audio when becoming ready', async () => {
// Buffer some audio while disconnected
const audioData1 = Buffer.from([1, 2, 3, 4]);
const audioData2 = Buffer.from([5, 6, 7, 8]);
client.sendAudio(audioData1);
client.sendAudio(audioData2);
// Verify buffered
let stats = client.getStats();
expect(stats.currentBufferedChunks).toBe(2);
expect(stats.audioBytesSent).toBe(0);
// Connect and become ready
await setupReadyClient();
// Should have flushed the buffer
stats = client.getStats();
expect(stats.audioBytesSent).toBe(8);
expect(stats.audioChunksSent).toBe(2);
expect(stats.currentBufferedChunks).toBe(0);
});
it('should detect buffer overflow', () => {
// Fill buffer to capacity (simulate overflow)
const chunkSize = 1024;
const maxChunks = 6000; // Default buffer size
for (let i = 0; i <= maxChunks; i++) {
client.sendAudio(Buffer.alloc(chunkSize));
}
expect(client.isBufferOverflowing()).toBe(true);
const stats = client.getStats();
expect(stats.bufferOverflowCount).toBeGreaterThan(0);
});
});
describe.skip('Helper Methods', () => {
it('should report correct connection status', async () => {
expect(client.isConnected()).toBe(false);
await setupConnectedClient();
expect(client.isConnected()).toBe(true);
await setupReadyClient();
expect(client.isConnected()).toBe(true);
// Disconnect
const closeHandler = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'close')[1];
closeHandler(1000, 'Normal closure');
expect(client.isConnected()).toBe(false);
});
it('should track statistics correctly', async () => {
await setupReadyClient();
// Send some audio
client.sendAudio(Buffer.alloc(100));
client.sendAudio(Buffer.alloc(200));
client.sendAudio(Buffer.alloc(300));
const stats = client.getStats();
expect(stats.audioBytesSent).toBe(600);
expect(stats.audioChunksSent).toBe(3);
expect(stats.audioChunksBuffered).toBe(3);
});
});
describe.skip('Memory Management', () => {
it('should clear ring buffer on disconnect', async () => {
// Buffer some audio
client.sendAudio(Buffer.alloc(100));
client.sendAudio(Buffer.alloc(200));
let stats = client.getStats();
expect(stats.currentBufferedChunks).toBe(2);
// Connect then disconnect
await setupConnectedClient();
const closeHandler = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'close')[1];
closeHandler(1000, 'Normal closure');
// Buffer should be cleared
stats = client.getStats();
expect(stats.currentBufferedChunks).toBe(0);
});
it('should cleanup WebSocket listeners on close', async () => {
await setupConnectedClient();
await client.stopRecording();
expect(mockWs.removeAllListeners).toHaveBeenCalled();
expect(mockWs.close).toHaveBeenCalled();
});
});
describe.skip('Message Handling', () => {
it('should handle transcription messages', async () => {
const onTranscript = jest.fn();
client = new RealTimeTwoWayWebSocketRecognitionClient({
url: 'ws://test.example.com/recognize',
onTranscript,
});
await setupConnectedClient();
const messageHandler = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'message')[1];
const transcriptMessage = JSON.stringify({
v: 1,
type: 'message',
data: {
type: 'Transcription',
finalTranscript: 'Hello world',
finalTranscriptConfidence: 0.95,
is_finished: false
}
});
messageHandler(transcriptMessage);
expect(onTranscript).toHaveBeenCalledWith(expect.objectContaining({
finalTranscript: 'Hello world',
finalTranscriptConfidence: 0.95,
is_finished: false
}));
});
it('should handle function call messages', async () => {
const onFunctionCall = jest.fn();
client = new RealTimeTwoWayWebSocketRecognitionClient({
url: 'ws://test.example.com/recognize',
onFunctionCall,
});
await setupConnectedClient();
const messageHandler = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'message')[1];
const functionCallMessage = JSON.stringify({
v: 1,
type: 'message',
data: {
type: 'FunctionCall',
audioUtteranceId: 'test-id',
functionName: 'playMusic',
functionArgJson: '{"song": "Bohemian Rhapsody"}'
}
});
messageHandler(functionCallMessage);
expect(onFunctionCall).toHaveBeenCalledWith(expect.objectContaining({
type: 'FunctionCall',
audioUtteranceId: 'test-id',
functionName: 'playMusic',
functionArgJson: '{"song": "Bohemian Rhapsody"}'
}));
});
it('should handle metadata messages', async () => {
const onMetadata = jest.fn();
client = new RealTimeTwoWayWebSocketRecognitionClient({
url: 'ws://test.example.com/recognize',
onMetadata,
});
await setupConnectedClient();
const messageHandler = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'message')[1];
const metadataMessage = JSON.stringify({
v: 1,
type: 'message',
data: {
type: 'Metadata',
audioUtteranceId: 'test-id',
duration: 1000
}
});
messageHandler(metadataMessage);
expect(onMetadata).toHaveBeenCalledWith(expect.objectContaining({
audioUtteranceId: 'test-id',
duration: 1000
}));
});
it('should handle error messages', async () => {
const onError = jest.fn();
client = new RealTimeTwoWayWebSocketRecognitionClient({
url: 'ws://test.example.com/recognize',
onError,
});
await setupConnectedClient();
const messageHandler = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'message')[1];
const errorMessage = JSON.stringify({
v: 1,
type: 'message',
data: {
type: 'Error',
message: 'Something went wrong'
}
});
messageHandler(errorMessage);
expect(onError).toHaveBeenCalledWith(expect.any(Error));
expect(onError.mock.calls[0][0].message).toContain('Something went wrong');
});
it('should handle primitive message data without crashing', async () => {
const onError = jest.fn();
client = new RealTimeTwoWayWebSocketRecognitionClient({
url: 'ws://test.example.com/recognize',
onError,
});
await setupConnectedClient();
const messageHandler = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'message')[1];
// Test with various primitive values
const primitiveMessages = [
JSON.stringify({ v: 1, type: 'test', data: 'string' }),
JSON.stringify({ v: 1, type: 'test', data: 123 }),
JSON.stringify({ v: 1, type: 'test', data: true }),
JSON.stringify({ v: 1, type: 'test', data: null }),
];
// Should not throw
expect(() => {
primitiveMessages.forEach(msg => messageHandler(msg));
}).not.toThrow();
// Should log errors for primitives
expect(onError).toHaveBeenCalled();
});
});
describe('Blob Audio Handling', () => {
it('should accept Blob as audio input', async () => {
const audioData = new Uint8Array([1, 2, 3, 4]);
const blob = new Blob([audioData], { type: 'audio/raw' });
// Should not throw
expect(() => client.sendAudio(blob)).not.toThrow();
});
it('should convert Blob to ArrayBuffer before buffering', async () => {
const audioData = new Uint8Array([1, 2, 3, 4]);
const blob = new Blob([audioData], { type: 'audio/raw' });
client.sendAudio(blob);
// Wait for async conversion
await new Promise(resolve => setTimeout(resolve, 100));
const stats = client.getStats();
// Should have buffered the converted data
expect(stats.currentBufferedChunks).toBeGreaterThan(0);
});
it('should handle empty Blob', async () => {
const blob = new Blob([], { type: 'audio/raw' });
client.sendAudio(blob);
// Wait for async conversion
await new Promise(resolve => setTimeout(resolve, 100));
const stats = client.getStats();
// Empty blob should not be buffered
expect(stats.currentBufferedChunks).toBe(0);
});
it('should handle large Blob', async () => {
const largeData = new Uint8Array(1024 * 1024); // 1MB
for (let i = 0; i < largeData.length; i++) {
largeData[i] = i % 256;
}
const blob = new Blob([largeData], { type: 'audio/raw' });
client.sendAudio(blob);
// Wait for async conversion
await new Promise(resolve => setTimeout(resolve, 100));
const stats = client.getStats();
expect(stats.currentBufferedChunks).toBe(1);
});
it('should handle multiple Blobs in sequence', async () => {
const blob1 = new Blob([new Uint8Array([1, 2, 3, 4])], { type: 'audio/raw' });
const blob2 = new Blob([new Uint8Array([5, 6, 7, 8])], { type: 'audio/raw' });
const blob3 = new Blob([new Uint8Array([9, 10, 11, 12])], { type: 'audio/raw' });
client.sendAudio(blob1);
client.sendAudio(blob2);
client.sendAudio(blob3);
// Wait for all async conversions
await new Promise(resolve => setTimeout(resolve, 200));
const stats = client.getStats();
expect(stats.currentBufferedChunks).toBe(3);
});
it('should handle mixed Blob and ArrayBuffer inputs', async () => {
const blob = new Blob([new Uint8Array([1, 2, 3, 4])], { type: 'audio/raw' });
const arrayBuffer = new ArrayBuffer(4);
const view = new Uint8Array(arrayBuffer);
view.set([5, 6, 7, 8]);
client.sendAudio(blob);
client.sendAudio(arrayBuffer);
// Wait for Blob conversion
await new Promise(resolve => setTimeout(resolve, 100));
const stats = client.getStats();
expect(stats.currentBufferedChunks).toBe(2);
});
it('should log error if Blob conversion fails', async () => {
const mockLogger = jest.fn();
const testClient = new RealTimeTwoWayWebSocketRecognitionClient({
url: 'ws://test.example.com/recognize',
logger: mockLogger
});
// Create a real Blob but spy on arrayBuffer to make it fail
const audioData = new Uint8Array([1, 2, 3, 4]);
const badBlob = new Blob([audioData], { type: 'audio/raw' });
// Mock arrayBuffer to reject
jest.spyOn(badBlob, 'arrayBuffer').mockRejectedValue(new Error('Conversion failed'));
testClient.sendAudio(badBlob);
// Wait for error handling
await new Promise(resolve => setTimeout(resolve, 100));
// Should have logged an error
const errorCalls = mockLogger.mock.calls.filter(call => call[0] === 'error');
expect(errorCalls.length).toBeGreaterThan(0);
});
// Note: The blobToArrayBuffer() function has dual-path support:
// - Modern browsers: Uses blob.arrayBuffer() [Chrome 76+, Safari 14+]
// - Older Smart TVs: Falls back to FileReader [Tizen 2018-2019, webOS 3.0-4.x]
it('should use blob.arrayBuffer() when available (modern path)', async () => {
const audioData = new Uint8Array([1, 2, 3, 4]);
const blob = new Blob([audioData], { type: 'audio/raw' });
// Spy on blob.arrayBuffer to verify it's called
const arrayBufferSpy = jest.spyOn(blob, 'arrayBuffer');
client.sendAudio(blob);
// Wait for async conversion
await new Promise(resolve => setTimeout(resolve, 100));
// Should have used modern blob.arrayBuffer()
expect(arrayBufferSpy).toHaveBeenCalled();
// Should have buffered successfully
const stats = client.getStats();
expect(stats.currentBufferedChunks).toBe(1);
});
it('should use FileReader fallback when blob.arrayBuffer not available (Smart TV path)', async () => {
const audioData = new Uint8Array([1, 2, 3, 4]);
// Create a real Blob
const blob = new Blob([audioData], { type: 'audio/raw' });
// Mock FileReader BEFORE removing arrayBuffer
const mockReadAsArrayBuffer = jest.fn();
const originalFileReader = (global as any).FileReader;
(global as any).FileReader = jest.fn().mockImplementation(() => ({
readAsArrayBuffer: mockReadAsArrayBuffer,
onload: null,
onerror: null,
result: audioData.buffer
}));
// Trigger FileReader path by simulating onload after a delay
mockReadAsArrayBuffer.mockImplementation(function(this: any) {
setTimeout(() => {
if (this.onload) {
this.result = audioData.buffer;
this.onload();
}
}, 10);
});
// Remove arrayBuffer method to simulate old Smart TV (must be done after blob creation)
Object.defineProperty(blob, 'arrayBuffer', {
value: undefined,
writable: true,
configurable: true
});
client.sendAudio(blob);
// Wait for FileReader async conversion
await new Promise(resolve => setTimeout(resolve, 150));
// Should have used FileReader
expect((global as any).FileReader).toHaveBeenCalled();
expect(mockReadAsArrayBuffer).toHaveBeenCalledWith(blob);
// Should have buffered successfully
const stats = client.getStats();
expect(stats.currentBufferedChunks).toBe(1);
// Cleanup
(global as any).FileReader = originalFileReader;
});
});
describe('Debug Logging', () => {
it('should not log debug messages when debug logging is disabled (default)', () => {
const mockLogger = jest.fn();
const testClient = new RealTimeTwoWayWebSocketRecognitionClient({
url: 'ws://test.example.com/recognize',
asrRequestConfig: {
provider: 'deepgram',
language: 'en',
sampleRate: 16000,
encoding: 'linear16'
},
logger: mockLogger
});
// Trigger some actions that would normally log debug messages
expect(testClient.getState()).toBe(ClientState.INITIAL);
// Debug logs should not be called
const debugCalls = mockLogger.mock.calls.filter(call => call[0] === 'debug');
expect(debugCalls.length).toBe(0);
// But other log levels should work
const nonDebugCalls = mockLogger.mock.calls.filter(call => call[0] !== 'debug');
// May or may not have non-debug logs, just checking we can track them
expect(nonDebugCalls.length).toBeGreaterThanOrEqual(0);
});
it('should log debug messages when enableDebugLog is true in debugCommand', () => {
const mockLogger = jest.fn();
const testClient = new RealTimeTwoWayWebSocketRecognitionClient({
url: 'ws://test.example.com/recognize',
asrRequestConfig: {
provider: 'deepgram',
language: 'en',
sampleRate: 16000,
encoding: 'linear16',
debugCommand: {
enableDebugLog: true
}
} as any, // Using 'as any' to bypass type checking for the new field
logger: mockLogger
});
// Note: Debug logging is enabled in onConnected() when ASR request is sent
// So we need to test after connection, but for now we verify the config is accepted
expect(testClient.getAudioUtteranceId()).toBeDefined();
});
it('should respect debugCommand.enableDebugLog flag', () => {
const mockLogger = jest.fn();
// Client with debug logging explicitly disabled
const clientNoDebug = new RealTimeTwoWayWebSocketRecognitionClient({
url: 'ws://test.example.com/recognize',
asrRequestConfig: {
provider: 'deepgram',
language: 'en',
sampleRate: 16000,
encoding: 'linear16',
debugCommand: {
enableDebugLog: false
}
} as any,
logger: mockLogger
});
expect(clientNoDebug.getAudioUtteranceId()).toBeDefined();
// Debug logs should not be called
const debugCalls = mockLogger.mock.calls.filter(call => call[0] === 'debug');
expect(debugCalls.length).toBe(0);
});
});
// Helper functions
async function setupConnectedClient() {
const connectPromise = client.connect();
mockWs.readyState = MockWebSocket.OPEN;
const openCall = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'open');
if (!openCall) {
throw new Error('No "open" event handler registered on mockWs');
}
const openHandler = openCall[1];
openHandler();
await connectPromise;
}
async function setupReadyClient() {
await setupConnectedClient();
const messageHandler = mockWs.on.mock.calls.find((call: any[]) => call[0] === 'message')[1];
const readyMessage = JSON.stringify({
v: 1,
type: 'message',
data: {
type: 'ClientControlMessage',
action: 'ready_for_uploading_recording',
audioUtteranceId: 'test-utterance-id'
}
});
messageHandler(readyMessage);
}
describe('stopAbnormally', () => {
beforeEach(() => {
// Create fresh mock WebSocket
mockWs = {
readyState: MockWebSocket.CONNECTING,
send: jest.fn(),
close: jest.fn(),
on: jest.fn().mockReturnThis(),
removeAllListeners: jest.fn(),
};
// Mock WebSocket constructor
(MockWebSocket as any).mockImplementation(() => mockWs);
// Create fresh client
client = new RealTimeTwoWayWebSocketRecognitionClient({
url: 'ws://localhost:3000',
asrRequestConfig: {
provider: 'deepgram',
language: 'en',
sampleRate: 16000,
encoding: 'linear16'
},
onTranscript: jest.fn(),
onError: jest.fn(),
onConnected: jest.fn(),
onDisconnected: jest.fn()
});
});
it.skip('should immediately close WebSocket connection', async () => {
await setupReadyClient();
expect(client.getState()).toBe(ClientState.READY);
client.stopAbnormally();
expect(mockWs.close).toHaveBeenCalledWith(1000, 'Client abnormal stop');
});
it.skip('should update state to STOPPED', async () => {
await setupReadyClient();
client.stopAbnormally();
expect(client.getState()).toBe(ClientState.STOPPED);
});
it('should work from any state', () => {
// Test from INITIAL state
expect(client.getState()).toBe(ClientState.INITIAL);
client.stopAbnormally();
expect(client.getState()).toBe(ClientState.STOPPED);
});
it.skip('should clean up resources', async () => {
await setupReadyClient();
// Send some audio to populate buffers
client.sendAudio(new ArrayBuffer(1000));
// Verify audio was sent
const statsBefore = client.getStats();
expect(statsBefore.audioBytesSent).toBeGreaterThan(0);
client.stopAbnormally();
// Cleanup resets stats
const statsAfter = client.getStats();
expect(statsAfter.audioBytesSent).toBe(0);
expect(statsAfter.audioChunksSent).toBe(0);
});
it.skip('should not send stop signal to server (immediate disconnect)', async () => {
await setupReadyClient();
jest.clearAllMocks(); // Clear connection setup messages
client.stopAbnormally();
// Should NOT send stop recording signal (unlike stopRecording)
// Only closes the WebSocket
expect(mockWs.send).not.toHaveBeenCalled();
expect(mockWs.close).toHaveBeenCalled();
});
it.skip('should differ from stopRecording behavior', async () => {
// stopAbnormally does NOT send stop signal (unlike stopRecording which sends STOP_RECORDING signal)
// This is verified by the previous test "should not send stop signal to server"
// This test verifies stopAbnormally doesn't wait for server response
await setupReadyClient();
// Call stopAbnormally
client.stopAbnormally();
// State should immediately be STOPPED (not STOPPING)
expect(client.getState()).toBe(ClientState.STOPPED);
// This is different from stopRecording which would be STOPPING and waiting for server
});
it('should be idempotent - safe to call multiple times', () => {
client.stopAbnormally();
expect(client.getState()).toBe(ClientState.STOPPED);
// Call again - should not throw
expect(() => client.stopAbnormally()).not.toThrow();
expect(client.getState()).toBe(ClientState.STOPPED);
});
});
});