inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
12 lines (11 loc) • 512 B
TypeScript
import { ChatCompletionTaskArgs, ModelEngine } from '../types/index.js';
import { CustomEngine } from '../engines/index.js';
export declare class ChatWithVisionEngine extends CustomEngine implements ModelEngine {
imageToTextModel: string;
chatModel: string;
constructor({ imageToTextModel, chatModel }: {
imageToTextModel: string;
chatModel: string;
});
processChatCompletionTask(task: ChatCompletionTaskArgs): Promise<import("../types/index.js").ChatCompletionTaskResult>;
}