inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
19 lines (18 loc) • 697 B
TypeScript
import { ModelEngine, SpeechToTextTaskArgs, ToolDefinition } from '../types/index.js';
import { CustomEngine } from '../engines/index.js';
type EngineArgs = {
speechToTextModel: string;
chatModel: string;
tools: Record<string, ToolDefinition>;
};
export declare class VoiceFunctionCallEngine extends CustomEngine implements ModelEngine {
speechToTextModel: string;
chatModel: string;
tools: Record<string, ToolDefinition>;
constructor({ speechToTextModel, chatModel, tools }: EngineArgs);
createTranscription(task: SpeechToTextTaskArgs): Promise<string>;
processSpeechToTextTask(task: SpeechToTextTaskArgs): Promise<{
text: string;
}>;
}
export {};