jorel
Version:
A unified wrapper for working with LLMs from multiple providers, including streams, images, documents & automatic tool use.
553 lines (552 loc) • 27.9 kB
JavaScript
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.JorEl = void 0;
const documents_1 = require("../documents");
const providers_1 = require("../providers");
const shared_1 = require("../shared");
const tools_1 = require("../tools");
const jorel_core_1 = require("./jorel.core");
const jorel_team_1 = require("./jorel.team");
/**
* Type guard to check if input is an array of LlmMessage objects
* @internal
*/
function isLlmMessageArray(input) {
return Array.isArray(input) && input.length > 0 && typeof input[0] === "object" && "role" in input[0];
}
/**
* Jor-El: Singular interface for managing multiple LLM providers and models
*/
class JorEl {
/**
* Create a new Jor-El instance.
*
* @param config - The configuration for the Jor-El instance.
* @param config.anthropic - Anthropic configuration (optional).
* @param config.googleGenAi - Google Generative AI configuration (optional).
* @param config.grok - Grok configuration (optional).
* @param config.groq - Groq configuration (optional).
* @param config.vertexAi - Google Vertex AI configuration (optional).
* @param config.ollama - Ollama configuration (optional).
* @param config.openAI - OpenAI configuration (optional).
* @param config.openRouter - OpenRouter configuration (optional).
* @param config.systemMessage - System message to include in all requests (optional).
* @param config.documentSystemMessage - System message to include in all requests with documents (optional).
* @param config.temperature - Default temperature for all requests (optional).
*/
constructor(config = {}) {
/**
* Public methods for managing models
*/
this.models = {
list: () => this._core.modelManager.listModels(),
register: (params) => {
this._core.providerManager.getProvider(params.provider); // Ensure provider exists
return this._core.modelManager.registerModel(params);
},
unregister: (model) => this._core.modelManager.unregisterModel(model),
getDefault: () => this._core.modelManager.getDefaultModel(),
setDefault: (model) => this._core.modelManager.registerModel({
model,
provider: "",
setAsDefault: true,
}),
setModelSpecificDefaults: (model, defaults) => this._core.modelManager.setModelSpecificDefaults(model, defaults),
embeddings: {
register: (params) => this._core.modelManager.registerEmbeddingModel(params),
unregister: (model) => this._core.modelManager.unregisterEmbeddingModel(model),
getDefault: () => this._core.modelManager.getDefaultEmbeddingModel(),
setDefault: (model) => this._core.modelManager.setDefaultEmbeddingModel(model),
list: () => this._core.modelManager.listEmbeddingModels(),
},
};
/**
* Public methods for managing providers
*/
this.providers = {
list: () => this._core.providerManager.listProviders(),
registerCustom: (provider, coreProvider) => this._core.providerManager.registerProvider(provider, coreProvider),
registerAnthropic: (config, withoutInitialModels) => {
const provider = new providers_1.AnthropicProvider(config);
this._core.providerManager.registerProvider(provider.name, provider);
if (!withoutInitialModels) {
const defaultModels = config?.bedrock ? providers_1.initialAnthropicBedrockModels : providers_1.initialAnthropicModels;
for (const model of defaultModels) {
this.models.register({ model, provider: provider.name });
}
}
},
registerGoogleGenAi: (config, withoutInitialModels) => {
const provider = new providers_1.GoogleGenerativeAIProvider(config);
this._core.providerManager.registerProvider(provider.name, provider);
if (!withoutInitialModels) {
for (const model of providers_1.initialGoogleGenAiModels) {
this.models.register({ model, provider: provider.name });
}
}
},
registerGrok: (config, withoutInitialModels) => {
const provider = new providers_1.GrokProvider(config);
this._core.providerManager.registerProvider(provider.name, provider);
if (!withoutInitialModels) {
for (const model of providers_1.initialGrokModels) {
this.models.register({ model, provider: provider.name });
}
}
},
registerGroq: (config, withoutInitialModels) => {
const provider = new providers_1.GroqProvider(config);
this._core.providerManager.registerProvider(provider.name, provider);
if (!withoutInitialModels) {
for (const model of providers_1.initialGroqModels) {
this.models.register({ model, provider: provider.name });
}
}
},
registerMistral: (config, withoutInitialModels) => {
const provider = new providers_1.MistralProvider(config);
this._core.providerManager.registerProvider(provider.name, provider);
if (!withoutInitialModels) {
for (const model of providers_1.initialMistralAiModels) {
this.models.register({ model, provider: provider.name });
}
for (const { model, dimensions } of providers_1.initialMistralAiEmbeddingModels) {
this.models.embeddings.register({ model, dimensions, provider: provider.name });
}
}
},
registerOllama: (config) => {
const provider = new providers_1.OllamaProvider(config);
this._core.providerManager.registerProvider(provider.name, provider);
},
registerOpenAi: (config, withoutInitialModels) => {
const provider = new providers_1.OpenAIProvider(config);
this._core.providerManager.registerProvider(provider.name, provider);
if (!withoutInitialModels) {
for (const model of providers_1.initialOpenAiModels) {
this.models.register({ model, provider: provider.name });
}
for (const { model, dimensions } of providers_1.initialOpenAiEmbeddingModels) {
this.models.embeddings.register({ model, dimensions, provider: provider.name });
}
}
},
registerOpenAiAzure: (config) => {
const provider = new providers_1.OpenAIProvider({ ...config, azure: true });
this._core.providerManager.registerProvider(provider.name, provider);
},
registerOpenRouter: (config, withoutInitialModels) => {
const provider = config?.useNativeSDK ? new providers_1.OpenRouterProviderNative(config) : new providers_1.OpenRouterProvider(config);
this._core.providerManager.registerProvider(provider.name, provider);
if (!withoutInitialModels) {
for (const model of providers_1.initialOpenRouterModels) {
this.models.register({ model, provider: provider.name });
}
}
},
registerGoogleVertexAi: (config, withoutInitialModels) => {
const provider = new providers_1.GoogleVertexAiProvider(config);
this._core.providerManager.registerProvider(provider.name, provider);
if (!withoutInitialModels) {
for (const model of providers_1.initialVertexAiModels) {
this.models.register({ model, provider: provider.name });
}
}
},
anthropic: {
addModel: (model, setAsDefault, defaults) => this.models.register({ model, provider: providers_1.AnthropicProvider.defaultName, setAsDefault, defaults }),
getClient: () => this._core.providerManager.getProvider(providers_1.AnthropicProvider.defaultName).client,
},
googleGenAi: {
addModel: (model, setAsDefault, defaults) => this.models.register({ model, provider: providers_1.GoogleGenerativeAIProvider.defaultName, setAsDefault, defaults }),
getClient: () => this._core.providerManager.getProvider(providers_1.GoogleGenerativeAIProvider.defaultName)
.client,
},
grok: {
addModel: (model, setAsDefault, defaults) => this.models.register({ model, provider: providers_1.GrokProvider.defaultName, setAsDefault, defaults }),
getClient: () => this._core.providerManager.getProvider(providers_1.GrokProvider.defaultName).client,
},
groq: {
addModel: (model, setAsDefault, defaults) => this.models.register({ model, provider: providers_1.GroqProvider.defaultName, setAsDefault, defaults }),
getClient: () => this._core.providerManager.getProvider(providers_1.GroqProvider.defaultName).client,
},
mistral: {
addModel: (model, setAsDefault, defaults) => this.models.register({ model, provider: providers_1.MistralProvider.defaultName, setAsDefault, defaults }),
getClient: () => this._core.providerManager.getProvider(providers_1.MistralProvider.defaultName).client,
},
openAi: {
addModel: (model) => this.models.register({ model, provider: providers_1.OpenAIProvider.defaultName }),
getClient: () => this._core.providerManager.getProvider(providers_1.OpenAIProvider.defaultName).client,
},
openAiAzure: {
addModel: (model, setAsDefault, defaults) => this.models.register({ model, provider: providers_1.OpenAIProvider.defaultName + "-azure", setAsDefault, defaults }),
getClient: () => this._core.providerManager.getProvider(providers_1.OpenAIProvider.defaultName + "-azure").client,
},
openRouter: {
addModel: (model, setAsDefault, defaults) => this.models.register({ model, provider: providers_1.OpenRouterProvider.defaultName, setAsDefault, defaults }),
getClient: () => this._core.providerManager.getProvider(providers_1.OpenRouterProvider.defaultName).client,
},
vertexAi: {
addModel: (model, setAsDefault, defaults) => this.models.register({ model, provider: providers_1.GoogleVertexAiProvider.defaultName, setAsDefault, defaults }),
getClient: () => this._core.providerManager.getProvider(providers_1.GoogleVertexAiProvider.defaultName).client,
},
};
this.systemMessage = config.systemMessage ?? "You are a helpful assistant.";
this._documentSystemMessage = config.documentSystemMessage
? this.validateDocumentSystemMessage(config.documentSystemMessage)
: "Here are some documents that you can consider in your response: {{documents}}";
this._core = new jorel_core_1.JorElCoreStore({
temperature: config.temperature === undefined ? 0 : config.temperature,
logger: config.logger,
logLevel: config.logLevel,
});
this.team = new jorel_team_1.JorElAgentManager(this._core);
if (config.anthropic)
this.providers.registerAnthropic(config.anthropic === true ? undefined : config.anthropic);
if (config.googleGenAi)
this.providers.registerGoogleGenAi(config.googleGenAi === true ? undefined : config.googleGenAi);
if (config.grok)
this.providers.registerGrok(config.grok === true ? undefined : config.grok);
if (config.groq)
this.providers.registerGroq(config.groq === true ? undefined : config.groq);
if (config.mistral)
this.providers.registerMistral(config.mistral === true ? undefined : config.mistral);
if (config.vertexAi)
this.providers.registerGoogleVertexAi(config.vertexAi === true ? undefined : config.vertexAi);
if (config.ollama)
this.providers.registerOllama(config.ollama === true ? undefined : config.ollama);
if (config.openAI)
this.providers.registerOpenAi(config.openAI === true ? undefined : config.openAI);
if (config.openAiAzure)
this.providers.registerOpenAiAzure(config.openAiAzure === true ? undefined : config.openAiAzure);
if (config.openRouter)
this.providers.registerOpenRouter(config.openRouter === true ? undefined : config.openRouter);
}
/**
* Default document system message for all requests (only used when documents are included)
*/
get documentSystemMessage() {
return this._documentSystemMessage;
}
/**
* Set the default document system message for all requests (only used when documents are included)
*/
set documentSystemMessage(documentSystemMessage) {
this._documentSystemMessage = this.validateDocumentSystemMessage(documentSystemMessage);
}
/**
* Default temperature for all requests
*/
get temperature() {
return this._core.defaultConfig.temperature;
}
/**
* Set the default temperature for all requests
*/
set temperature(temperature) {
this._core.defaultConfig.temperature = temperature;
}
/**
* Logger instance
*/
get logger() {
return this._core.logger;
}
/**
* Set the logger instance
*/
set logger(logger) {
this._core.logger = logger;
}
/**
* Log level
*/
get logLevel() {
return this._core.logger.logLevel;
}
/**
* Set the log level
*/
set logLevel(logLevel) {
this._core.logger.logLevel = logLevel;
}
/**
* Generate a response for a given set of messages.
*
* @param messages - The messages to generate a response for.
* @param config - The configuration for the generation.
* @remarks When providing message arrays, you can set `cacheControl` on
* system/user messages to hint provider-specific prompt caching (e.g., Anthropic).
* @param config.model - Model to use for this generation (optional).
* @param config.systemMessage - System message to include in this request (optional).
* @param config.temperature - Temperature for this request (optional).
* @param config.tools - Tools to use for this request (optional).
*/
async generate(messages, config = {}) {
return this._core.generate(messages, config);
}
async text(taskOrMessages, config = {}, includeMeta = false) {
let _messages;
if (isLlmMessageArray(taskOrMessages)) {
_messages = taskOrMessages;
}
else {
const taskConfig = config;
const { systemMessage, documents, documentSystemMessage, messageHistory } = taskConfig;
_messages = await this.generateMessages(taskOrMessages, systemMessage, documents, documentSystemMessage, messageHistory);
}
const _config = {
...config,
tools: config.tools
? config.tools instanceof tools_1.LlmToolKit
? config.tools
: new tools_1.LlmToolKit(config.tools)
: undefined,
};
const { output, messages, stopReason } = await this._core.generateAndProcessTools(_messages, _config);
const response = output.content || "";
const meta = output.meta;
return includeMeta ? { response, meta, messages, stopReason } : response;
}
async json(taskOrMessages, config = {}, includeMeta = false, strict = false) {
let _messages;
let jsonSchema;
if (isLlmMessageArray(taskOrMessages)) {
_messages = taskOrMessages;
jsonSchema = config.jsonSchema || true;
}
else {
const taskConfig = config;
const { systemMessage, documents, documentSystemMessage, messageHistory } = taskConfig;
_messages = await this.generateMessages(taskOrMessages, systemMessage, documents, documentSystemMessage, messageHistory);
jsonSchema = taskConfig.jsonSchema || true;
}
const _config = {
...config,
json: jsonSchema,
tools: config.tools
? config.tools instanceof tools_1.LlmToolKit
? config.tools
: new tools_1.LlmToolKit(config.tools)
: undefined,
};
const { output, messages, stopReason } = await this._core.generateAndProcessTools(_messages, _config);
const parsed = output.content ? tools_1.LlmToolKit.deserialize(output.content, strict) : {};
return includeMeta ? { response: parsed, meta: output.meta, messages, stopReason } : parsed;
}
/**
* Generate a stream of response chunks for a given set of messages.
*
* @param messages - The messages to generate a response for.
* @param config - The configuration for the generation.
*/
async *generateContentStream(messages, config = {}) {
yield* this._core.generateContentStream(messages, config);
}
/**
* Generate a stream of response chunks for a given task or set of messages.
*
* @param taskOrMessages - The task to generate a response for (either a string or an array of strings and ImageContent objects) or an array of messages.
* @param config - Configuration for the specific generation.
*/
async *stream(taskOrMessages, config = {}) {
const stream = this.streamWithMeta(taskOrMessages, config);
for await (const chunk of stream) {
if (chunk.type === "chunk" && chunk.content)
yield chunk.content;
}
}
/**
* Generate a stream of response chunks for a given task or set of messages with metadata.
*
* @param taskOrMessages - The task to generate a response for (either a string or an array of strings and ImageContent objects) or an array of messages.
* @param config - Configuration for the specific generation.
*/
async *streamWithMeta(taskOrMessages, config = {}) {
let messages;
if (isLlmMessageArray(taskOrMessages)) {
messages = taskOrMessages;
}
else {
const taskConfig = config;
const { systemMessage, documents, documentSystemMessage, messageHistory } = taskConfig;
messages = await this.generateMessages(taskOrMessages, systemMessage, documents, documentSystemMessage, messageHistory);
}
const _config = {
...config,
tools: config.tools
? config.tools instanceof tools_1.LlmToolKit
? config.tools
: new tools_1.LlmToolKit(config.tools)
: undefined,
};
if (config.tools) {
yield* this._core.generateStreamAndProcessTools(messages, _config);
}
else {
const stream = this._core.generateContentStream(messages, _config);
const messageId = (0, shared_1.generateUniqueId)();
yield { type: "messageStart", messageId };
for await (const chunk of stream) {
if (chunk.type === "chunk") {
yield { ...chunk, messageId };
}
else if (chunk.type === "reasoningChunk") {
yield { ...chunk, messageId };
}
if (chunk.type === "response") {
if (chunk.role === "assistant") {
const message = {
id: messageId,
role: "assistant",
content: chunk.content,
reasoningContent: chunk.reasoningContent,
createdAt: Date.now(),
};
yield { type: "messageEnd", messageId, message };
messages.push(message);
}
else {
const message = {
id: messageId,
role: "assistant_with_tools",
content: chunk.content,
toolCalls: chunk.toolCalls,
reasoningContent: chunk.reasoningContent,
meta: chunk.meta,
createdAt: Date.now(),
};
yield { type: "messageEnd", messageId, message };
messages.push(message);
}
yield chunk;
yield { type: "messages", messages, stopReason: config.abortSignal?.aborted ? "userCancelled" : "completed" };
}
}
}
}
/**
* Create an embedding for a given text.
*
* @param text - The text to create an embedding for.
* @param config - The configuration for the embedding.
* @param config.model - The model to use for the embedding (optional).
* @param config.abortSignal - AbortSignal to cancel the embedding request (optional).
*/
async embed(text, config = {}) {
return this._core.generateEmbedding(text, config.model, config.abortSignal);
}
/**
* Process approved tool calls in messages and return updated messages.
* This method is useful when you have messages with approved tool calls that need to be executed.
*
* @param messages - The messages containing tool calls to process.
* @param config - Configuration for tool call processing.
* @param config.tools - The tools to use for processing (required).
* @param config.context - Context to pass to tool executors (optional).
* @param config.secureContext - Secure context to pass to tool executors (optional).
* @param config.maxErrors - Maximum number of tool call errors allowed (optional, defaults to 3).
* @param config.maxCalls - Maximum number of tool calls to process (optional, defaults to 5).
* @param config.abortSignal - AbortSignal to cancel tool processing (optional).
* @returns Updated messages with processed tool calls.
*/
async processToolCalls(messages, config) {
const tools = config.tools instanceof tools_1.LlmToolKit ? config.tools : new tools_1.LlmToolKit(config.tools);
// Find the latest message with tool calls that need processing
const messageWithToolCalls = messages
.slice()
.reverse()
.find((message) => message.role === "assistant_with_tools" &&
message.toolCalls.some((call) => call.executionState === "pending"));
if (!messageWithToolCalls) {
this.logger.debug("JorEl", "No pending tool calls found to process");
return messages;
}
this.logger.debug("JorEl", "Processing pending tool calls");
// Process the tool calls
const processedMessage = await tools.processCalls(messageWithToolCalls, {
context: config.context,
secureContext: config.secureContext,
maxErrors: config.maxErrors || 3,
maxCalls: config.maxCalls || 5,
abortSignal: config.abortSignal,
});
this.logger.debug("JorEl", "Finished processing pending tool calls");
return messages.map((msg) => {
if (msg.id === processedMessage.id) {
return processedMessage;
}
return msg;
});
}
/**
* Generate a system message - optionally with a set of documents.
*
* @param systemMessage - The system message to use.
* @param documents - The documents to include in the system message (optional).
* @param documentSystemMessage - The system message to use for documents (optional).
*/
generateSystemMessage(systemMessage = "", { documents, documentSystemMessage, } = {}) {
const _documents = documents instanceof documents_1.LlmDocumentCollection ? documents : new documents_1.LlmDocumentCollection(documents);
return (0, providers_1.generateSystemMessage)(systemMessage || this.systemMessage, documentSystemMessage || this._documentSystemMessage, _documents);
}
/**
* Generate a user message.
*
* @param content - The content to include in the user message.
*/
generateUserMessage(content) {
return (0, providers_1.generateUserMessage)(content);
}
/**
* Helper to generate messages for a given task input.
*
* @param content - The task input content (either a string or an array of strings and ImageContent objects).
* @param systemMessage - The system message to include (optional).
* @param documents - The documents to include in the system message (optional).
* @param documentSystemMessage - The system message to use for documents (optional).
* @param messageHistory - The message history to include (optional). If provided along with a dedicated system
* message, the system message inside the messages will be ignored.
*/
async generateMessages(content, systemMessage, documents, documentSystemMessage, messageHistory = []) {
if (Array.isArray(content)) {
if (content.length === 0) {
throw new Error("The task input must not be an empty array.");
}
}
else {
if (!content) {
throw new Error("The task input must not be empty.");
}
}
const _userMessage = await this.generateUserMessage(content);
// Empty string overrides default to skip system message
if (systemMessage !== "" && (systemMessage || this.systemMessage)) {
if (messageHistory && messageHistory.some((m) => m.role === "system")) {
this._core.logger.info("JorEl", "Message history contains system messages. These will be ignored.");
}
const _systemMessage = this.generateSystemMessage(systemMessage, { documents, documentSystemMessage });
return [_systemMessage, ...messageHistory.filter((m) => m.role !== "system"), _userMessage];
}
else {
if (documents && documents.length > 0) {
this.logger.warn("JorEl", "Documents were provided but no system message was included. The documents will not be included in the response.");
}
}
return [...messageHistory, _userMessage];
}
/**
* Helper to validate the document system message.
*
* @param documentSystemMessage - The document system message to validate.
* @internal
*/
validateDocumentSystemMessage(documentSystemMessage) {
if (!documentSystemMessage)
return documentSystemMessage;
if (documentSystemMessage.includes("{{documents}}"))
return documentSystemMessage;
throw new Error('The "documentSystemMessage" must either be empty or include the placeholder "{{documents}}" to insert the document list.');
}
}
exports.JorEl = JorEl;