@sign-speak/react-sdk
Version:
Unlock Sign Language Recognition, Avatar, and Speech Recognition.
51 lines • 1.79 kB
TypeScript
import { RecognitionResult } from "./rest";
export interface SignLanguageRecognitionWebsocketConfig {
sliceLength?: number;
singleRecognitionMode?: boolean;
model?: string;
deviceId?: string;
[key: string]: any;
}
export declare class SignSpeakWebSocket {
private config;
private socket;
private mediaRecorder;
private stream;
private isConnected;
private onPrediction;
constructor(config: SignLanguageRecognitionWebsocketConfig, onPrediction: (prediction: RecognitionResult) => void);
/**
* Initializes the WebSocket connection.
*/
connect(): Promise<void>;
/**
* Handles incoming messages from the WebSocket.
*/
private handleMessage;
/**
* Streams live video from a MediaDevice.
* If a deviceId is specified in the config, it will be used.
*/
streamLiveVideo(): Promise<void>;
/**
* Streams video segments over the WebSocket asynchronously.
* Accepts an async iterable (or generator) that yields video segments (File or Blob)
* one at a time. Each segment is sent over the WebSocket with a delay matching sliceLength.
*
* This method can be used instead of streamLiveVideo() if you wish to provide
* your own video segments asynchronously.
*
* @param videoStream An async iterable of video segments (File or Blob).
*/
streamVideoSegments(videoStream: AsyncIterable<File | Blob>): Promise<void>;
/**
* Stops the streaming process and sends a termination signal.
* @param closeSocket If true, the WebSocket connection will close.
*/
stopStreaming(closeSocket?: boolean): Promise<void>;
/**
* Disconnects the WebSocket.
*/
disconnect(): void;
}
//# sourceMappingURL=websockets.d.ts.map