inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
5 lines (4 loc) • 611 B
TypeScript
import { TransformersJsModel, TransformersJsSpeechModel } from '../../types/index.js';
import { TransformersJsModelConfig, TransformersJsModelComponents, SpeechModelComponents } from './engine.js';
export declare function loadModelComponents<TModel extends TransformersJsModelComponents = TransformersJsModelComponents>(modelOpts: TransformersJsModel | TransformersJsModel & TransformersJsSpeechModel, config: TransformersJsModelConfig): Promise<TModel>;
export declare function loadSpeechModelComponents(modelOpts: TransformersJsSpeechModel, config: TransformersJsModelConfig): Promise<SpeechModelComponents>;