jorel
Version:
A unified wrapper for working with LLMs from multiple providers, including streams, images, documents & automatic tool use.
373 lines (372 loc) • 19 kB
TypeScript
import { CreateLlmDocument, LlmDocument, LlmDocumentCollection } from "../documents";
import { LoggerOption, LogLevel, LogService } from "../logger";
import { ImageContent } from "../media";
import { AnthropicConfig, GoogleGenerativeAIConfig, GoogleVertexAiConfig, GroqConfig, JsonSpecification, LlmAssistantMessage, LlmAssistantMessageMeta, LlmAssistantMessageWithToolCalls, LlmCoreProvider, LlmJsonResponseWithMeta, LlmMessage, LlmStreamEvent, LlmTextResponseWithMeta, LlmToolChoice, MistralConfig, OllamaConfig, OpenAiAzureConfig, OpenAIConfig, OpenRouterConfig, ReasoningEffort, StreamBufferConfig, Verbosity } from "../providers";
import { Nullable } from "../shared";
import { LlmTool, LlmToolConfiguration, LLmToolContextSegment, LlmToolKit } from "../tools";
import { ModelSpecificDefaults } from "./jorel.models";
import { JorElAgentManager } from "./jorel.team";
interface InitialConfig {
anthropic?: AnthropicConfig | true;
googleGenAi?: GoogleGenerativeAIConfig | true;
grok?: OpenAIConfig | true;
groq?: GroqConfig | true;
mistral?: MistralConfig | true;
ollama?: OllamaConfig | true;
openAI?: OpenAIConfig | true;
openAiAzure?: Omit<OpenAiAzureConfig, "azure"> | true;
openRouter?: OpenRouterConfig | true;
vertexAi?: GoogleVertexAiConfig | true;
systemMessage?: Nullable<string>;
documentSystemMessage?: string;
temperature?: Nullable<number>;
logger?: LoggerOption | LogService;
logLevel?: LogLevel;
}
export interface JorElCoreGenerationConfig {
temperature?: Nullable<number>;
maxTokens?: number;
/** Reasoning effort for the model - only supported by some providers & models (currently mainly OpenAI) */
reasoningEffort?: ReasoningEffort;
/** Verbosity for the model - only supported by some providers & models (currently only OpenAI) */
verbosity?: Verbosity;
/** Stream buffering configuration for controlling chunk emission rate */
streamBuffer?: StreamBufferConfig;
/** AbortSignal to cancel the generation request */
abortSignal?: AbortSignal;
}
export interface JorElTextGenerationConfigWithTools extends JorElCoreGenerationConfig {
model?: string;
systemMessage?: string;
documentSystemMessage?: string;
documents?: (LlmDocument | CreateLlmDocument)[] | LlmDocumentCollection;
tools?: LlmToolKit | (LlmTool | LlmToolConfiguration)[];
toolChoice?: LlmToolChoice;
maxToolCalls?: number;
maxToolCallErrors?: number;
context?: LLmToolContextSegment;
secureContext?: LLmToolContextSegment;
messageHistory?: LlmMessage[];
json?: boolean | JsonSpecification;
jsonDescription?: string;
}
export interface JorElJsonGenerationConfigWithTools extends Omit<JorElTextGenerationConfigWithTools, "json"> {
jsonSchema?: JsonSpecification;
}
export interface JorElMessagesGenerationConfig extends JorElCoreGenerationConfig {
model?: string;
tools?: LlmToolKit | (LlmTool | LlmToolConfiguration)[];
toolChoice?: LlmToolChoice;
maxToolCalls?: number;
maxToolCallErrors?: number;
context?: LLmToolContextSegment;
secureContext?: LLmToolContextSegment;
json?: boolean | JsonSpecification;
jsonDescription?: string;
}
export interface JorElMessagesJsonGenerationConfig extends Omit<JorElMessagesGenerationConfig, "json"> {
jsonSchema?: JsonSpecification;
}
export interface JorElGenerationConfigWithTools extends JorElCoreGenerationConfig {
model?: string;
tools?: LlmToolKit;
toolChoice?: LlmToolChoice;
maxToolCalls?: number;
maxToolCallErrors?: number;
context?: LLmToolContextSegment;
secureContext?: LLmToolContextSegment;
json?: boolean | JsonSpecification;
jsonDescription?: string;
}
export type JorElTaskInput = string | (string | ImageContent)[];
export type JorElGenerationOutput = (LlmAssistantMessage | LlmAssistantMessageWithToolCalls) & {
meta: LlmAssistantMessageMeta;
};
/**
* Jor-El: Singular interface for managing multiple LLM providers and models
*/
export declare class JorEl {
/**
* System message use for all requests by default (unless specified per request)
*/
systemMessage: string;
/**
* Agent related functionality
*/
readonly team: JorElAgentManager;
/**
* Public methods for managing models
*/
readonly models: {
list: () => import("./jorel.models").ModelEntry[];
register: (params: {
model: string;
provider: string;
setAsDefault?: boolean;
defaults?: ModelSpecificDefaults;
}) => void;
unregister: (model: string) => void;
getDefault: () => string;
setDefault: (model: string) => void;
setModelSpecificDefaults: (model: string, defaults: ModelSpecificDefaults) => void;
embeddings: {
register: (params: {
model: string;
provider: string;
dimensions: number;
setAsDefault?: boolean;
}) => void;
unregister: (model: string) => void;
getDefault: () => string;
setDefault: (model: string) => void;
list: () => {
model: string;
provider: string;
}[];
};
};
/**
* Public methods for managing providers
*/
readonly providers: {
list: () => string[];
registerCustom: (provider: string, coreProvider: LlmCoreProvider) => void;
registerAnthropic: (config?: AnthropicConfig, withoutInitialModels?: boolean) => void;
registerGoogleGenAi: (config?: GoogleGenerativeAIConfig, withoutInitialModels?: boolean) => void;
registerGrok: (config?: OpenAIConfig, withoutInitialModels?: boolean) => void;
registerGroq: (config?: GroqConfig, withoutInitialModels?: boolean) => void;
registerMistral: (config?: MistralConfig, withoutInitialModels?: boolean) => void;
registerOllama: (config?: OllamaConfig) => void;
registerOpenAi: (config?: OpenAIConfig, withoutInitialModels?: boolean) => void;
registerOpenAiAzure: (config?: Omit<OpenAiAzureConfig, "azure">) => void;
registerOpenRouter: (config?: OpenRouterConfig, withoutInitialModels?: boolean) => void;
registerGoogleVertexAi: (config?: GoogleVertexAiConfig, withoutInitialModels?: boolean) => void;
anthropic: {
addModel: (model: string, setAsDefault?: boolean, defaults?: ModelSpecificDefaults) => void;
getClient: () => import("@anthropic-ai/sdk").default | import("@anthropic-ai/bedrock-sdk").default;
};
googleGenAi: {
addModel: (model: string, setAsDefault?: boolean, defaults?: ModelSpecificDefaults) => void;
getClient: () => import("@google/genai").GoogleGenAI;
};
grok: {
addModel: (model: string, setAsDefault?: boolean, defaults?: ModelSpecificDefaults) => void;
getClient: () => import("openai").default | import("openai").AzureOpenAI;
};
groq: {
addModel: (model: string, setAsDefault?: boolean, defaults?: ModelSpecificDefaults) => void;
getClient: () => import("openai").default | import("openai").AzureOpenAI;
};
mistral: {
addModel: (model: string, setAsDefault?: boolean, defaults?: ModelSpecificDefaults) => void;
getClient: () => import("@mistralai/mistralai").Mistral;
};
openAi: {
addModel: (model: string) => void;
getClient: () => import("openai").default | import("openai").AzureOpenAI;
};
openAiAzure: {
addModel: (model: string, setAsDefault?: boolean, defaults?: ModelSpecificDefaults) => void;
getClient: () => import("openai").default | import("openai").AzureOpenAI;
};
openRouter: {
addModel: (model: string, setAsDefault?: boolean, defaults?: ModelSpecificDefaults) => void;
getClient: () => import("openai").default | import("openai").AzureOpenAI;
};
vertexAi: {
addModel: (model: string, setAsDefault?: boolean, defaults?: ModelSpecificDefaults) => void;
getClient: () => import("@google-cloud/vertexai").VertexAI;
};
};
/**
* Create a new Jor-El instance.
*
* @param config - The configuration for the Jor-El instance.
* @param config.anthropic - Anthropic configuration (optional).
* @param config.googleGenAi - Google Generative AI configuration (optional).
* @param config.grok - Grok configuration (optional).
* @param config.groq - Groq configuration (optional).
* @param config.vertexAi - Google Vertex AI configuration (optional).
* @param config.ollama - Ollama configuration (optional).
* @param config.openAI - OpenAI configuration (optional).
* @param config.openRouter - OpenRouter configuration (optional).
* @param config.systemMessage - System message to include in all requests (optional).
* @param config.documentSystemMessage - System message to include in all requests with documents (optional).
* @param config.temperature - Default temperature for all requests (optional).
*/
constructor(config?: InitialConfig);
/**
* Default document system message for all requests (only used when documents are included)
*/
get documentSystemMessage(): string;
/**
* Set the default document system message for all requests (only used when documents are included)
*/
set documentSystemMessage(documentSystemMessage: string);
/**
* Default temperature for all requests
*/
get temperature(): Nullable<number> | undefined;
/**
* Set the default temperature for all requests
*/
set temperature(temperature: Nullable<number>);
/**
* Logger instance
*/
get logger(): LogService;
/**
* Set the logger instance
*/
set logger(logger: LogService);
/**
* Log level
*/
get logLevel(): LogLevel;
/**
* Set the log level
*/
set logLevel(logLevel: LogLevel);
/**
* Generate a response for a given set of messages.
*
* @param messages - The messages to generate a response for.
* @param config - The configuration for the generation.
* @remarks When providing message arrays, you can set `cacheControl` on
* system/user messages to hint provider-specific prompt caching (e.g., Anthropic).
* @param config.model - Model to use for this generation (optional).
* @param config.systemMessage - System message to include in this request (optional).
* @param config.temperature - Temperature for this request (optional).
* @param config.tools - Tools to use for this request (optional).
*/
generate(messages: LlmMessage[], config?: JorElGenerationConfigWithTools): Promise<JorElGenerationOutput>;
/**
* Generate a response for a given task.
*
* @param task - The task to generate a response for (either a string or an array of strings and ImageContent objects).
* @param config - Configuration for the specific generation.
* @param includeMeta - Whether to include the metadata and all previous messages in the response.
* @returns The text response, or an object with the response, metadata, and messages.
*/
text(task: JorElTaskInput, config?: JorElTextGenerationConfigWithTools, includeMeta?: false): Promise<string>;
text(task: JorElTaskInput, config?: JorElTextGenerationConfigWithTools, includeMeta?: true): Promise<LlmTextResponseWithMeta>;
/**
* Generate a response for a given set of messages.
*
* @param messages - The messages to generate a response for.
* @param config - Configuration for the specific generation.
* @param includeMeta - Whether to include the metadata and all previous messages in the response.
* @returns The text response, or an object with the response, metadata, and messages.
*/
text(messages: LlmMessage[], config?: JorElMessagesGenerationConfig, includeMeta?: false): Promise<string>;
text(messages: LlmMessage[], config?: JorElMessagesGenerationConfig, includeMeta?: true): Promise<LlmTextResponseWithMeta>;
/**
* Generate a JSON response for a given task.
*
* @param task - The task to generate a response for (either a string or an array of strings and ImageContent objects).
* @param config - Configuration for the specific generation.
* @param includeMeta - Whether to include the metadata and all previous messages in the response.
* @returns The JSON response, or an object with the response, metadata, and messages.
* @throws Error - If the response is not valid JSON.
*/
json(task: JorElTaskInput, config?: JorElJsonGenerationConfigWithTools, includeMeta?: false): Promise<object>;
json(task: JorElTaskInput, config?: JorElJsonGenerationConfigWithTools, includeMeta?: true): Promise<LlmJsonResponseWithMeta>;
/**
* Generate a JSON response for a given set of messages.
*
* @param messages - The messages to generate a response for.
* @param config - Configuration for the specific generation.
* @param includeMeta - Whether to include the metadata and all previous messages in the response.
* @param strict - Whether to parse the response strictly as JSON (default: false).
* If true, the response will be parsed as JSON using the JSON.parse method.
* If false, the response may also be wrapped in ```json ... ```.
* @returns The JSON response, or an object with the response, metadata, and messages.
* @throws Error - If the response is not valid JSON.
*/
json(messages: LlmMessage[], config?: JorElMessagesJsonGenerationConfig, includeMeta?: false, strict?: boolean): Promise<object>;
json(messages: LlmMessage[], config?: JorElMessagesJsonGenerationConfig, includeMeta?: true, strict?: boolean): Promise<LlmJsonResponseWithMeta>;
/**
* Generate a stream of response chunks for a given set of messages.
*
* @param messages - The messages to generate a response for.
* @param config - The configuration for the generation.
*/
generateContentStream(messages: LlmMessage[], config?: JorElGenerationConfigWithTools): AsyncGenerator<import("../providers").LlmStreamProviderResponseChunkEvent | import("../providers").LlmStreamResponseEvent | import("../providers").LlmStreamToolCallEvent, void, unknown>;
/**
* Generate a stream of response chunks for a given task or set of messages.
*
* @param taskOrMessages - The task to generate a response for (either a string or an array of strings and ImageContent objects) or an array of messages.
* @param config - Configuration for the specific generation.
*/
stream(taskOrMessages: JorElTaskInput | LlmMessage[], config?: JorElTextGenerationConfigWithTools | JorElMessagesGenerationConfig): AsyncGenerator<string, void, unknown>;
/**
* Generate a stream of response chunks for a given task or set of messages with metadata.
*
* @param taskOrMessages - The task to generate a response for (either a string or an array of strings and ImageContent objects) or an array of messages.
* @param config - Configuration for the specific generation.
*/
streamWithMeta(taskOrMessages: JorElTaskInput | LlmMessage[], config?: JorElTextGenerationConfigWithTools | JorElMessagesGenerationConfig): AsyncGenerator<LlmStreamEvent, void, unknown>;
/**
* Create an embedding for a given text.
*
* @param text - The text to create an embedding for.
* @param config - The configuration for the embedding.
* @param config.model - The model to use for the embedding (optional).
* @param config.abortSignal - AbortSignal to cancel the embedding request (optional).
*/
embed(text: string, config?: {
model?: string;
abortSignal?: AbortSignal;
}): Promise<number[]>;
/**
* Process approved tool calls in messages and return updated messages.
* This method is useful when you have messages with approved tool calls that need to be executed.
*
* @param messages - The messages containing tool calls to process.
* @param config - Configuration for tool call processing.
* @param config.tools - The tools to use for processing (required).
* @param config.context - Context to pass to tool executors (optional).
* @param config.secureContext - Secure context to pass to tool executors (optional).
* @param config.maxErrors - Maximum number of tool call errors allowed (optional, defaults to 3).
* @param config.maxCalls - Maximum number of tool calls to process (optional, defaults to 5).
* @param config.abortSignal - AbortSignal to cancel tool processing (optional).
* @returns Updated messages with processed tool calls.
*/
processToolCalls(messages: LlmMessage[], config: {
tools: LlmToolKit | (LlmTool | LlmToolConfiguration)[];
context?: LLmToolContextSegment;
secureContext?: LLmToolContextSegment;
maxErrors?: number;
maxCalls?: number;
abortSignal?: AbortSignal;
}): Promise<LlmMessage[]>;
/**
* Generate a system message - optionally with a set of documents.
*
* @param systemMessage - The system message to use.
* @param documents - The documents to include in the system message (optional).
* @param documentSystemMessage - The system message to use for documents (optional).
*/
generateSystemMessage(systemMessage?: string, { documents, documentSystemMessage, }?: {
documents?: (LlmDocument | CreateLlmDocument)[] | LlmDocumentCollection;
documentSystemMessage?: string;
}): import("../providers").LlmSystemMessage;
/**
* Generate a user message.
*
* @param content - The content to include in the user message.
*/
generateUserMessage(content: JorElTaskInput): Promise<import("../providers").LlmUserMessage>;
/**
* Helper to generate messages for a given task input.
*
* @param content - The task input content (either a string or an array of strings and ImageContent objects).
* @param systemMessage - The system message to include (optional).
* @param documents - The documents to include in the system message (optional).
* @param documentSystemMessage - The system message to use for documents (optional).
* @param messageHistory - The message history to include (optional). If provided along with a dedicated system
* message, the system message inside the messages will be ignored.
*/
generateMessages(content: JorElTaskInput, systemMessage?: string, documents?: (LlmDocument | CreateLlmDocument)[] | LlmDocumentCollection, documentSystemMessage?: string, messageHistory?: LlmMessage[]): Promise<LlmMessage[]>;
}
export {};