inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
21 lines (20 loc) • 682 B
TypeScript
import { TransformersJsModelConfig } from './engine.js';
interface ComponentValidationErrors {
model?: string;
tokenizer?: string;
processor?: string;
vocoder?: string;
}
interface ModelValidationErrors {
primaryModel?: ComponentValidationErrors;
textModel?: ComponentValidationErrors;
visionModel?: ComponentValidationErrors;
speechModel?: ComponentValidationErrors;
vocoderModel?: ComponentValidationErrors;
}
export interface ModelValidationResult {
message: string;
errors?: ModelValidationErrors;
}
export declare function validateModelFiles(config: TransformersJsModelConfig): Promise<ModelValidationResult | undefined>;
export {};