jorel
Version:
A unified wrapper for working with LLMs from multiple providers, including streams, images, documents & automatic tool use.
28 lines (27 loc) • 1.29 kB
TypeScript
import { Mistral } from "@mistralai/mistralai";
import { LlmCoreProvider, LlmGenerationConfig, LlmMessage, LlmResponse, LlmStreamProviderResponseChunkEvent, LlmStreamResponseEvent } from "../../providers";
export interface MistralConfig {
apiKey?: string;
retryConfig?: {
strategy: "backoff";
backoff?: {
initialInterval: number;
maxInterval: number;
exponent: number;
maxElapsedTime: number;
};
retryConnectionErrors?: boolean;
};
timeout?: number;
}
/** Provides access to OpenAI and other compatible services */
export declare class MistralProvider implements LlmCoreProvider {
readonly name: string;
static readonly defaultName = "mistral";
readonly client: Mistral;
constructor({ apiKey, retryConfig, timeout }?: MistralConfig);
generateResponse(model: string, messages: LlmMessage[], config?: LlmGenerationConfig): Promise<LlmResponse>;
generateResponseStream(model: string, messages: LlmMessage[], config?: LlmGenerationConfig): AsyncGenerator<LlmStreamProviderResponseChunkEvent | LlmStreamResponseEvent, void, unknown>;
getAvailableModels(): Promise<string[]>;
createEmbedding(model: string, text: string, abortSignal?: AbortSignal): Promise<number[]>;
}