jorel
Version:
A unified wrapper for working with LLMs from multiple providers, including streams, images, documents & automatic tool use.
69 lines (68 loc) • 3.73 kB
TypeScript
import { LogService } from "../logger";
import { InitLlmGenerationConfig, LLmGenerationStopReason, LlmMessage, LlmStreamEvent, LlmStreamProviderResponseChunkEvent, LlmStreamResponseEvent, LlmStreamToolCallEvent } from "../providers";
import { JorElGenerationConfigWithTools, JorElGenerationOutput } from "./jorel";
import { JorElModelManager } from "./jorel.models";
import { JorElProviderManager } from "./jorel.providers";
export declare class JorElCoreStore {
defaultConfig: InitLlmGenerationConfig;
logger: LogService;
providerManager: JorElProviderManager;
modelManager: JorElModelManager;
constructor(config?: InitLlmGenerationConfig);
/**
* Applies model-specific defaults and overrides to messages and config
* @param messages - The messages to apply the defaults and overrides to
* @param config - The config to apply the defaults and overrides to
* @param modelEntry - The model entry to apply (with potential defaults)
*/
private applyModelDefaultsAndOverrides;
/**
* Generate a response for a given set of messages
* @param messages - The messages to generate a response for
* @param config - The config to use for this generation
* @param config.model - Model to use for this generation (optional)
* @param config.systemMessage - System message to include in this request (optional)
* @param config.temperature - Temperature for this request (optional)
* @param config.tools - Tools to use for this request (optional)
*/
generate(messages: LlmMessage[], config?: JorElGenerationConfigWithTools): Promise<JorElGenerationOutput>;
/**
* Internal method to generate a response and process tool calls until a final response is generated
* @param messages - The messages to generate a response for
* @param config - The config to use for this generation
* @param autoApprove - Whether to auto-approve tool calls
*/
generateAndProcessTools(messages: LlmMessage[], config?: JorElGenerationConfigWithTools, autoApprove?: boolean): Promise<{
output: JorElGenerationOutput;
messages: LlmMessage[];
stopReason: LLmGenerationStopReason;
}>;
/**
* Generate a stream of response chunks for a given set of messages
* @param messages - The messages to generate a response for
* @param config - The config to use for this generation
*/
generateContentStream(messages: LlmMessage[], config?: JorElGenerationConfigWithTools): AsyncGenerator<LlmStreamProviderResponseChunkEvent | LlmStreamToolCallEvent | LlmStreamResponseEvent, void, unknown>;
/**
* Generate a stream of response chunks for a given set of messages and process tool calls until a final response is generated
* @param messages - The messages to generate a response for
* @param config - The config to use for this generation
* @param autoApprove - Whether to auto-approve tool calls
*/
generateStreamAndProcessTools(messages: LlmMessage[], config?: JorElGenerationConfigWithTools, autoApprove?: boolean): AsyncGenerator<LlmStreamEvent, void, unknown>;
/**
* Helper method to set a tool call to error state
*/
private setCallToError;
/**
* Helper method to create a buffered stream from content chunks
*/
private createBufferedStream;
/**
* Generate an embedding for a given text
* @param text - The text to generate an embedding for
* @param model - The model to use for this generation (optional)
* @param abortSignal - AbortSignal to cancel the embedding request (optional)
*/
generateEmbedding(text: string, model?: string, abortSignal?: AbortSignal): Promise<number[]>;
}