primvoices-react
Version:
React client for the PrimVoices Agents API
199 lines (198 loc) • 5.8 kB
TypeScript
/**
* WebSocketClient
*
* A client for handling WebSocket communications with the PrimVoices TTS API.
*
* This client supports:
* - Establishing WebSocket connections
* - Sending audio data from microphone
* - Receiving and processing audio responses
* - Managing the lifecycle of an audio conversation
*/
export interface WebSocketClientConfig {
agentId: string;
functionId?: string;
environment?: string;
strategy?: "cascade" | "sts";
logLevel?: "DEBUG" | "INFO" | "WARN" | "ERROR";
serverUrl?: string;
apiUrl?: string;
customParameters?: Record<string, string>;
canary?: boolean;
origin?: "debugger" | "web";
}
export interface DebugMessage {
type: string;
turn: number;
name: string;
data: Record<string, unknown>;
}
/**
* Audio statistics containing level and speech detection information
* isPlayback indicates whether these stats are for playback audio (true) or microphone input (false)
*/
export interface AudioStats {
level: number;
isSpeaking: boolean;
isPlayback?: boolean;
}
export type AudioDataCallback = (audioData: Float32Array) => void;
export type AudioStatsCallback = (stats: AudioStats) => void;
export type DebugMessageCallback = (messages: DebugMessage[]) => void;
export type StatusCallback = () => void;
export declare class WebSocketClient {
private socket;
private audioContext;
private mediaStream;
private microphoneSource;
private audioWorklet;
private analyser;
private audioQueue;
private currentAudioSource;
private debugQueue;
private workletInitialized;
private isListening;
private isConnected;
private isPlaying;
private callSid;
private streamSid;
private config;
private speechDetected;
private statsInterval;
private initialAgentId;
private initialEnvironment?;
private redirected;
private scheduledSources;
private nextPlaybackTime;
private scheduledMarkTimers;
private scheduleTimer;
private readonly minPrebufferSeconds;
private readonly scheduleHorizonSeconds;
private onConnectionOpen;
private onConnectionClose;
private onConnectionError;
private onStartListening;
private onStopListening;
private onPlayStart;
private onPlayStop;
private onAudioStats;
private onDebugMessage;
private lastRedirectKey;
constructor(config: WebSocketClientConfig);
/**
* Set callbacks for different events
*/
setCallbacks({ onOpen, onClose, onError, onListeningStart, onListeningStop, onAudioStart, onAudioStop, onAudioStats, onDebugMessage, }: {
onOpen?: StatusCallback;
onClose?: StatusCallback;
onError?: StatusCallback;
onListeningStart?: StatusCallback;
onListeningStop?: StatusCallback;
onAudioStart?: StatusCallback;
onAudioStop?: StatusCallback;
onAudioStats?: AudioStatsCallback;
onDebugMessage?: DebugMessageCallback;
}): void;
getAgentConfiguration(): Promise<{
url: string;
parameters: Record<string, string>;
}>;
/**
* Initialize the WebSocket connection
*/
connect(): Promise<void>;
/**
* Handle audio data received from the server
*/
private handleAudioMessage;
private handleClearMessage;
/**
* Handle mark events received from the server
* These marks will be associated with the next audio chunk received
*/
private handleMarkMessage;
/**
* Handle debug messages received from the server
*/
private handleDebugMessage;
private handleControlMessage;
/**
* Initialize the audio context and related components
*/
private initAudioContext;
/**
* Start capturing audio from the microphone and sending it to the server
*/
startListening(): Promise<void>;
/**
* Stop capturing audio from the microphone
*/
stopListening(): void;
/**
* Send a text message to the server
*/
sendTextEvent(text: string): void;
/**
* Close the WebSocket connection and clean up resources
*/
disconnect(): void;
/**
* Process captured audio data and send it to the server
*/
private processAudioData;
/**
* Convert array buffer to base64
*/
private arrayBufferToBase64;
/**
* Downsample audio buffer - matches audio.ts implementation
*/
private downsampleBuffer;
/**
* Add audio data to the playback queue
*/
private addToAudioQueue;
/**
* Clear the audio playback queue and stop any current playback
*/
private clearAudioQueue;
/**
* Clear the debug queue
*/
private clearDebugQueue;
/**
* Play the next audio chunk in the queue
*/
private playNextInQueue;
/**
* Compute total buffered audio seconds currently in the queue (excluding marks)
*/
private getBufferedSecondsInQueue;
/**
* Ensure playback is started and enough audio is scheduled ahead to avoid gaps
*/
private schedulePlayback;
/**
* Schedule queued items contiguously at nextPlaybackTime up to a horizon
*/
private scheduleFromQueue;
/**
* Get the current audio level (volume) from the analyzer
* This works for both microphone input and audio playback, depending on what"s currently active
*/
getAudioLevel(): number;
/**
* Start monitoring audio levels for speech detection
*/
private startAudioStatsMonitoring;
/**
* Stop audio stats monitoring
*/
private stopAudioStatsMonitoring;
/**
* Utility methods to check current state
*/
isCurrentlyConnected(): boolean;
isCurrentlyListening(): boolean;
isCurrentlyPlaying(): boolean;
}