@llumiverse/drivers
Version:
LLM driver implementations. Currently supported are: openai, huggingface, bedrock, replicate.
25 lines (24 loc) • 1.36 kB
TypeScript
import { AIModel, AbstractDriver, Completion, CompletionChunkObject, DriverOptions, EmbeddingsOptions, EmbeddingsResult, ExecutionOptions, PromptSegment } from "@llumiverse/core";
import { OpenAITextMessage } from "../openai/openai_format.js";
import { FetchClient } from "@vertesia/api-fetch-client";
import { ResponseFormat } from "./types.js";
interface MistralAIDriverOptions extends DriverOptions {
apiKey: string;
endpoint_url?: string;
}
export declare class MistralAIDriver extends AbstractDriver<MistralAIDriverOptions, OpenAITextMessage[]> {
static PROVIDER: string;
provider: string;
apiKey: string;
client: FetchClient;
endpointUrl?: string;
constructor(options: MistralAIDriverOptions);
getResponseFormat: (_options: ExecutionOptions) => ResponseFormat | undefined;
protected formatPrompt(segments: PromptSegment[], opts: ExecutionOptions): Promise<OpenAITextMessage[]>;
requestTextCompletion(messages: OpenAITextMessage[], options: ExecutionOptions): Promise<Completion>;
requestTextCompletionStream(messages: OpenAITextMessage[], options: ExecutionOptions): Promise<AsyncIterable<CompletionChunkObject>>;
listModels(): Promise<AIModel<string>[]>;
validateConnection(): Promise<boolean>;
generateEmbeddings({ text, model }: EmbeddingsOptions): Promise<EmbeddingsResult>;
}
export {};