@lewist9x/distil
Version:
An opinionated library for managing LLM pipelines. Define, track, rate, and curate prompt–completion pairs for fine-tuning.
10 lines (9 loc) • 319 B
TypeScript
import { LLMInput, InferenceResult } from "./types";
export declare class InferenceEngine {
private esClient;
private logger;
private openaiClient;
private openLLMClient;
constructor(logLevel?: "DEBUG" | "INFO" | "WARNING" | "ERROR");
callInference(input: LLMInput): Promise<InferenceResult>;
}