inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
8 lines (7 loc) • 582 B
TypeScript
import type { InferenceServer } from '../../server.js';
export declare function createOpenAIRequestHandlers(inferenceServer: InferenceServer): {
chatCompletions: (req: import("http").IncomingMessage, res: import("http").ServerResponse) => Promise<void>;
completions: (req: import("http").IncomingMessage, res: import("http").ServerResponse) => Promise<void>;
models: (req: import("http").IncomingMessage, res: import("http").ServerResponse) => Promise<void>;
embeddings: (req: import("http").IncomingMessage, res: import("http").ServerResponse) => Promise<void>;
};