inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
20 lines (19 loc) • 799 B
TypeScript
import type { ModelPool } from '../pool.js';
import type { ModelStore } from '../store.js';
import { ModelEngine, EngineStartContext } from '../types/index.js';
export declare const BuiltInEngines: {
readonly gpt4all: "gpt4all";
readonly nodeLlamaCpp: "node-llama-cpp";
readonly transformersJs: "transformers-js";
readonly stableDiffusionCpp: "stable-diffusion-cpp";
};
export type BuiltInEngineName = typeof BuiltInEngines[keyof typeof BuiltInEngines];
export declare const builtInEngineNames: string[];
export declare class CustomEngine implements ModelEngine {
pool: ModelPool;
store: ModelStore;
start({ pool, store }: EngineStartContext): Promise<void>;
prepareModel(): Promise<void>;
createInstance(): Promise<void>;
disposeInstance(): Promise<void>;
}