inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
21 lines • 530 B
JavaScript
export const BuiltInEngines = {
gpt4all: 'gpt4all',
nodeLlamaCpp: 'node-llama-cpp',
transformersJs: 'transformers-js',
stableDiffusionCpp: 'stable-diffusion-cpp',
};
export const builtInEngineNames = [
...Object.values(BuiltInEngines),
];
export class CustomEngine {
pool;
store;
async start({ pool, store }) {
this.pool = pool;
this.store = store;
}
async prepareModel() { }
async createInstance() { }
async disposeInstance() { }
}
//# sourceMappingURL=index.js.map