inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
6 lines (5 loc) • 479 B
TypeScript
import http from 'node:http';
import { InferenceServer } from './server.js';
export declare function createOpenAIMiddleware(inferenceServer: InferenceServer): import("express-serve-static-core").Router;
export declare function createExpressMiddleware(inferenceServer: InferenceServer): import("express-serve-static-core").Router;
export declare function createExpressServer(inferenceServer: InferenceServer): http.Server<typeof http.IncomingMessage, typeof http.ServerResponse>;