UNPKG

langchain

Version:
688 lines (687 loc) 28.1 kB
"use strict"; var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { if (k2 === undefined) k2 = k; var desc = Object.getOwnPropertyDescriptor(m, k); if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { desc = { enumerable: true, get: function() { return m[k]; } }; } Object.defineProperty(o, k2, desc); }) : (function(o, m, k, k2) { if (k2 === undefined) k2 = k; o[k2] = m[k]; })); var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { Object.defineProperty(o, "default", { enumerable: true, value: v }); }) : function(o, v) { o["default"] = v; }); var __importStar = (this && this.__importStar) || (function () { var ownKeys = function(o) { ownKeys = Object.getOwnPropertyNames || function (o) { var ar = []; for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k; return ar; }; return ownKeys(o); }; return function (mod) { if (mod && mod.__esModule) return mod; var result = {}; if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]); __setModuleDefault(result, mod); return result; }; })(); Object.defineProperty(exports, "__esModule", { value: true }); exports.ConfigurableModel = void 0; exports._inferModelProvider = _inferModelProvider; exports.initChatModel = initChatModel; const chat_models_1 = require("@langchain/core/language_models/chat_models"); const runnables_1 = require("@langchain/core/runnables"); const stream_1 = require("@langchain/core/utils/stream"); const _SUPPORTED_PROVIDERS = [ "openai", "anthropic", "azure_openai", "cohere", "google-vertexai", "google-vertexai-web", "google-genai", "ollama", "together", "fireworks", "mistralai", "groq", "bedrock", "cerebras", "deepseek", "xai", ]; async function _initChatModelHelper(model, modelProvider, // eslint-disable-next-line @typescript-eslint/no-explicit-any params = {}) { const modelProviderCopy = modelProvider || _inferModelProvider(model); if (!modelProviderCopy) { throw new Error(`Unable to infer model provider for { model: ${model} }, please specify modelProvider directly.`); } // eslint-disable-next-line @typescript-eslint/no-unused-vars const { modelProvider: _unused, ...passedParams } = params; try { switch (modelProviderCopy) { case "openai": { const { ChatOpenAI } = await Promise.resolve().then(() => __importStar(require("@langchain/openai"))); return new ChatOpenAI({ model, ...passedParams }); } case "anthropic": { const { ChatAnthropic } = await Promise.resolve().then(() => __importStar(require("@langchain/anthropic"))); return new ChatAnthropic({ model, ...passedParams }); } case "azure_openai": { const { AzureChatOpenAI } = await Promise.resolve().then(() => __importStar(require("@langchain/openai"))); return new AzureChatOpenAI({ model, ...passedParams }); } case "cohere": { const { ChatCohere } = await Promise.resolve().then(() => __importStar(require("@langchain/cohere"))); return new ChatCohere({ model, ...passedParams }); } case "google-vertexai": { const { ChatVertexAI } = await Promise.resolve().then(() => __importStar(require("@langchain/google-vertexai"))); return new ChatVertexAI({ model, ...passedParams }); } case "google-vertexai-web": { const { ChatVertexAI } = await Promise.resolve().then(() => __importStar(require("@langchain/google-vertexai-web"))); return new ChatVertexAI({ model, ...passedParams }); } case "google-genai": { const { ChatGoogleGenerativeAI } = await Promise.resolve().then(() => __importStar(require("@langchain/google-genai"))); return new ChatGoogleGenerativeAI({ model, ...passedParams }); } case "ollama": { const { ChatOllama } = await Promise.resolve().then(() => __importStar(require("@langchain/ollama"))); return new ChatOllama({ model, ...passedParams }); } case "mistralai": { const { ChatMistralAI } = await Promise.resolve().then(() => __importStar(require("@langchain/mistralai"))); return new ChatMistralAI({ model, ...passedParams }); } case "groq": { const { ChatGroq } = await Promise.resolve().then(() => __importStar(require("@langchain/groq"))); return new ChatGroq({ model, ...passedParams }); } case "cerebras": { const { ChatCerebras } = await Promise.resolve().then(() => __importStar(require("@langchain/cerebras"))); return new ChatCerebras({ model, ...passedParams }); } case "bedrock": { const { ChatBedrockConverse } = await Promise.resolve().then(() => __importStar(require("@langchain/aws"))); return new ChatBedrockConverse({ model, ...passedParams }); } case "deepseek": { const { ChatDeepSeek } = await Promise.resolve().then(() => __importStar(require("@langchain/deepseek"))); return new ChatDeepSeek({ model, ...passedParams }); } case "xai": { const { ChatXAI } = await Promise.resolve().then(() => __importStar(require("@langchain/xai"))); return new ChatXAI({ model, ...passedParams }); } case "fireworks": { // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore - Can not install as a proper dependency due to circular dependency const { ChatFireworks } = await Promise.resolve().then(() => __importStar(require( // We can not 'expect-error' because if you explicitly build `@langchain/community` // this import will be able to be resolved, thus there will be no error. However // this will never be the case in CI. // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore - Can not install as a proper dependency due to circular dependency "@langchain/community/chat_models/fireworks"))); return new ChatFireworks({ model, ...passedParams }); } case "together": { // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore - Can not install as a proper dependency due to circular dependency const { ChatTogetherAI } = await Promise.resolve().then(() => __importStar(require( // We can not 'expect-error' because if you explicitly build `@langchain/community` // this import will be able to be resolved, thus there will be no error. However // this will never be the case in CI. // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore - Can not install as a proper dependency due to circular dependency "@langchain/community/chat_models/togetherai"))); return new ChatTogetherAI({ model, ...passedParams }); } default: { const supported = _SUPPORTED_PROVIDERS.join(", "); throw new Error(`Unsupported { modelProvider: ${modelProviderCopy} }.\n\nSupported model providers are: ${supported}`); } } // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e) { if ("code" in e && e.code.includes("ERR_MODULE_NOT_FOUND")) { const attemptedPackage = new Error(e).message .split("Error: Cannot find package '")[1] .split("'")[0]; throw new Error(`Unable to import ${attemptedPackage}. Please install with ` + `\`npm install ${attemptedPackage}\` or \`yarn add ${attemptedPackage}\``); } throw e; } } /** * Attempts to infer the model provider based on the given model name. * * @param {string} modelName - The name of the model to infer the provider for. * @returns {string | undefined} The inferred model provider name, or undefined if unable to infer. * * @example * _inferModelProvider("gpt-4"); // returns "openai" * _inferModelProvider("claude-2"); // returns "anthropic" * _inferModelProvider("unknown-model"); // returns undefined */ function _inferModelProvider(modelName) { if (modelName.startsWith("gpt-3") || modelName.startsWith("gpt-4") || modelName.startsWith("o1") || modelName.startsWith("o3") || modelName.startsWith("o4")) { return "openai"; } else if (modelName.startsWith("claude")) { return "anthropic"; } else if (modelName.startsWith("command")) { return "cohere"; } else if (modelName.startsWith("accounts/fireworks")) { return "fireworks"; } else if (modelName.startsWith("gemini")) { return "google-vertexai"; } else if (modelName.startsWith("amazon.")) { return "bedrock"; } else { return undefined; } } /** * Internal class used to create chat models. * * @internal */ class ConfigurableModel extends chat_models_1.BaseChatModel { _llmType() { return "chat_model"; } constructor(fields) { super(fields); Object.defineProperty(this, "lc_namespace", { enumerable: true, configurable: true, writable: true, value: ["langchain", "chat_models"] }); // eslint-disable-next-line @typescript-eslint/no-explicit-any Object.defineProperty(this, "_defaultConfig", { enumerable: true, configurable: true, writable: true, value: {} }); /** * @default "any" */ Object.defineProperty(this, "_configurableFields", { enumerable: true, configurable: true, writable: true, value: "any" }); /** * @default "" */ Object.defineProperty(this, "_configPrefix", { enumerable: true, configurable: true, writable: true, value: void 0 }); /** * Methods which should be called after the model is initialized. * The key will be the method name, and the value will be the arguments. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any Object.defineProperty(this, "_queuedMethodOperations", { enumerable: true, configurable: true, writable: true, value: {} }); // Extract the input types from the `BaseModel` class. Object.defineProperty(this, "withStructuredOutput", { enumerable: true, configurable: true, writable: true, value: (schema, ...args) => { this._queuedMethodOperations.withStructuredOutput = [schema, ...args]; return new ConfigurableModel({ defaultConfig: this._defaultConfig, configurableFields: this._configurableFields, configPrefix: this._configPrefix, queuedMethodOperations: this._queuedMethodOperations, }); } }); this._defaultConfig = fields.defaultConfig ?? {}; if (fields.configurableFields === "any") { this._configurableFields = "any"; } else { this._configurableFields = fields.configurableFields ?? [ "model", "modelProvider", ]; } if (fields.configPrefix) { this._configPrefix = fields.configPrefix.endsWith("_") ? fields.configPrefix : `${fields.configPrefix}_`; } else { this._configPrefix = ""; } this._queuedMethodOperations = fields.queuedMethodOperations ?? this._queuedMethodOperations; } async _model(config) { const params = { ...this._defaultConfig, ...this._modelParams(config) }; let initializedModel = await _initChatModelHelper(params.model, params.modelProvider, params); // Apply queued method operations const queuedMethodOperationsEntries = Object.entries(this._queuedMethodOperations); if (queuedMethodOperationsEntries.length > 0) { for (const [method, args] of queuedMethodOperationsEntries) { if (method in initializedModel && // eslint-disable-next-line @typescript-eslint/no-explicit-any typeof initializedModel[method] === "function") { // eslint-disable-next-line @typescript-eslint/no-explicit-any initializedModel = await initializedModel[method](...args); } } } return initializedModel; } async _generate(messages, options, runManager) { const model = await this._model(options); return model._generate(messages, options ?? {}, runManager); } bindTools(tools, // eslint-disable-next-line @typescript-eslint/no-explicit-any params) { this._queuedMethodOperations.bindTools = [tools, params]; return new ConfigurableModel({ defaultConfig: this._defaultConfig, configurableFields: this._configurableFields, configPrefix: this._configPrefix, queuedMethodOperations: this._queuedMethodOperations, }); } // eslint-disable-next-line @typescript-eslint/no-explicit-any _modelParams(config) { const configurable = config?.configurable ?? {}; // eslint-disable-next-line @typescript-eslint/no-explicit-any let modelParams = {}; for (const [key, value] of Object.entries(configurable)) { if (key.startsWith(this._configPrefix)) { const strippedKey = this._removePrefix(key, this._configPrefix); modelParams[strippedKey] = value; } } if (this._configurableFields !== "any") { modelParams = Object.fromEntries(Object.entries(modelParams).filter(([key]) => this._configurableFields.includes(key))); } return modelParams; } _removePrefix(str, prefix) { return str.startsWith(prefix) ? str.slice(prefix.length) : str; } /** * Bind config to a Runnable, returning a new Runnable. * @param {RunnableConfig | undefined} [config] - The config to bind. * @returns {RunnableBinding<RunInput, RunOutput, CallOptions>} A new RunnableBinding with the bound config. */ withConfig(config) { const mergedConfig = { ...(config || {}) }; const modelParams = this._modelParams(mergedConfig); const remainingConfig = Object.fromEntries(Object.entries(mergedConfig).filter(([k]) => k !== "configurable")); remainingConfig.configurable = Object.fromEntries(Object.entries(mergedConfig.configurable || {}).filter(([k]) => this._configPrefix && !Object.keys(modelParams).includes(this._removePrefix(k, this._configPrefix)))); const newConfigurableModel = new ConfigurableModel({ defaultConfig: { ...this._defaultConfig, ...modelParams }, configurableFields: Array.isArray(this._configurableFields) ? [...this._configurableFields] : this._configurableFields, configPrefix: this._configPrefix, queuedMethodOperations: this._queuedMethodOperations, }); return new runnables_1.RunnableBinding({ config: mergedConfig, bound: newConfigurableModel, }); } async invoke(input, options) { const model = await this._model(options); const config = (0, runnables_1.ensureConfig)(options); return model.invoke(input, config); } async stream(input, options) { const model = await this._model(options); const wrappedGenerator = new stream_1.AsyncGeneratorWithSetup({ generator: await model.stream(input, options), config: options, }); await wrappedGenerator.setup; return stream_1.IterableReadableStream.fromAsyncGenerator(wrappedGenerator); } async batch(inputs, options, batchOptions) { // We can super this since the base runnable implementation of // `.batch` will call `.invoke` on each input. return super.batch(inputs, options, batchOptions); } async *transform(generator, options) { const model = await this._model(options); const config = (0, runnables_1.ensureConfig)(options); yield* model.transform(generator, config); } async *streamLog(input, options, streamOptions) { const model = await this._model(options); const config = (0, runnables_1.ensureConfig)(options); yield* model.streamLog(input, config, { ...streamOptions, _schemaFormat: "original", includeNames: streamOptions?.includeNames, includeTypes: streamOptions?.includeTypes, includeTags: streamOptions?.includeTags, excludeNames: streamOptions?.excludeNames, excludeTypes: streamOptions?.excludeTypes, excludeTags: streamOptions?.excludeTags, }); } streamEvents(input, options, streamOptions) { // eslint-disable-next-line @typescript-eslint/no-this-alias const outerThis = this; async function* wrappedGenerator() { const model = await outerThis._model(options); const config = (0, runnables_1.ensureConfig)(options); const eventStream = model.streamEvents(input, config, streamOptions); for await (const chunk of eventStream) { yield chunk; } } return stream_1.IterableReadableStream.fromAsyncGenerator(wrappedGenerator()); } } exports.ConfigurableModel = ConfigurableModel; // ################################# FOR CONTRIBUTORS ################################# // // If adding support for a new provider, please append the provider // name to the supported list in the docstring below. // // #################################################################################### /** * Initialize a ChatModel from the model name and provider. * Must have the integration package corresponding to the model provider installed. * * @template {extends BaseLanguageModelInput = BaseLanguageModelInput} RunInput - The input type for the model. * @template {extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions} CallOptions - Call options for the model. * * @param {string | ChatModelProvider} [model] - The name of the model, e.g. "gpt-4", "claude-3-opus-20240229". * Can be prefixed with the model provider, e.g. "openai:gpt-4", "anthropic:claude-3-opus-20240229". * @param {Object} [fields] - Additional configuration options. * @param {string} [fields.modelProvider] - The model provider. Supported values include: * - openai (@langchain/openai) * - anthropic (@langchain/anthropic) * - azure_openai (@langchain/openai) * - google-vertexai (@langchain/google-vertexai) * - google-vertexai-web (@langchain/google-vertexai-web) * - google-genai (@langchain/google-genai) * - bedrock (@langchain/aws) * - cohere (@langchain/cohere) * - fireworks (@langchain/community/chat_models/fireworks) * - together (@langchain/community/chat_models/togetherai) * - mistralai (@langchain/mistralai) * - groq (@langchain/groq) * - ollama (@langchain/ollama) * - cerebras (@langchain/cerebras) * - deepseek (@langchain/deepseek) * - xai (@langchain/xai) * @param {string[] | "any"} [fields.configurableFields] - Which model parameters are configurable: * - undefined: No configurable fields. * - "any": All fields are configurable. (See Security Note in description) * - string[]: Specified fields are configurable. * @param {string} [fields.configPrefix] - Prefix for configurable fields at runtime. * @param {Record<string, any>} [fields.params] - Additional keyword args to pass to the ChatModel constructor. * @returns {Promise<ConfigurableModel<RunInput, CallOptions>>} A class which extends BaseChatModel. * @throws {Error} If modelProvider cannot be inferred or isn't supported. * @throws {Error} If the model provider integration package is not installed. * * @example Initialize non-configurable models * ```typescript * import { initChatModel } from "langchain/chat_models/universal"; * * const gpt4 = await initChatModel("openai:gpt-4", { * temperature: 0.25, * }); * const gpt4Result = await gpt4.invoke("what's your name"); * * const claude = await initChatModel("anthropic:claude-3-opus-20240229", { * temperature: 0.25, * }); * const claudeResult = await claude.invoke("what's your name"); * * const gemini = await initChatModel("gemini-1.5-pro", { * modelProvider: "google-vertexai", * temperature: 0.25, * }); * const geminiResult = await gemini.invoke("what's your name"); * ``` * * @example Create a partially configurable model with no default model * ```typescript * import { initChatModel } from "langchain/chat_models/universal"; * * const configurableModel = await initChatModel(undefined, { * temperature: 0, * configurableFields: ["model", "apiKey"], * }); * * const gpt4Result = await configurableModel.invoke("what's your name", { * configurable: { * model: "gpt-4", * }, * }); * * const claudeResult = await configurableModel.invoke("what's your name", { * configurable: { * model: "claude-3-5-sonnet-20240620", * }, * }); * ``` * * @example Create a fully configurable model with a default model and a config prefix * ```typescript * import { initChatModel } from "langchain/chat_models/universal"; * * const configurableModelWithDefault = await initChatModel("gpt-4", { * modelProvider: "openai", * configurableFields: "any", * configPrefix: "foo", * temperature: 0, * }); * * const openaiResult = await configurableModelWithDefault.invoke( * "what's your name", * { * configurable: { * foo_apiKey: process.env.OPENAI_API_KEY, * }, * } * ); * * const claudeResult = await configurableModelWithDefault.invoke( * "what's your name", * { * configurable: { * foo_model: "claude-3-5-sonnet-20240620", * foo_modelProvider: "anthropic", * foo_temperature: 0.6, * foo_apiKey: process.env.ANTHROPIC_API_KEY, * }, * } * ); * ``` * * @example Bind tools to a configurable model: * ```typescript * import { initChatModel } from "langchain/chat_models/universal"; * import { z } from "zod"; * import { tool } from "@langchain/core/tools"; * * const getWeatherTool = tool( * (input) => { * // Do something with the input * return JSON.stringify(input); * }, * { * schema: z * .object({ * location: z * .string() * .describe("The city and state, e.g. San Francisco, CA"), * }) * .describe("Get the current weather in a given location"), * name: "GetWeather", * description: "Get the current weather in a given location", * } * ); * * const getPopulationTool = tool( * (input) => { * // Do something with the input * return JSON.stringify(input); * }, * { * schema: z * .object({ * location: z * .string() * .describe("The city and state, e.g. San Francisco, CA"), * }) * .describe("Get the current population in a given location"), * name: "GetPopulation", * description: "Get the current population in a given location", * } * ); * * const configurableModel = await initChatModel("gpt-4", { * configurableFields: ["model", "modelProvider", "apiKey"], * temperature: 0, * }); * * const configurableModelWithTools = configurableModel.bindTools([ * getWeatherTool, * getPopulationTool, * ]); * * const configurableToolResult = await configurableModelWithTools.invoke( * "Which city is hotter today and which is bigger: LA or NY?", * { * configurable: { * apiKey: process.env.OPENAI_API_KEY, * }, * } * ); * * const configurableToolResult2 = await configurableModelWithTools.invoke( * "Which city is hotter today and which is bigger: LA or NY?", * { * configurable: { * model: "claude-3-5-sonnet-20240620", * apiKey: process.env.ANTHROPIC_API_KEY, * }, * } * ); * ``` * * @description * This function initializes a ChatModel based on the provided model name and provider. * It supports various model providers and allows for runtime configuration of model parameters. * * Security Note: Setting `configurableFields` to "any" means fields like api_key, base_url, etc. * can be altered at runtime, potentially redirecting model requests to a different service/user. * Make sure that if you're accepting untrusted configurations, you enumerate the * `configurableFields` explicitly. * * The function will attempt to infer the model provider from the model name if not specified. * Certain model name prefixes are associated with specific providers: * - gpt-3... or gpt-4... -> openai * - claude... -> anthropic * - amazon.... -> bedrock * - gemini... -> google-vertexai * - command... -> cohere * - accounts/fireworks... -> fireworks * * @since 0.2.11 * @version 0.2.11 */ async function initChatModel(model, // eslint-disable-next-line @typescript-eslint/no-explicit-any fields) { // eslint-disable-next-line prefer-const let { configurableFields, configPrefix, modelProvider, ...params } = { configPrefix: "", ...(fields ?? {}), }; if (modelProvider === undefined && model?.includes(":")) { const modelComponents = model.split(":", 2); if (_SUPPORTED_PROVIDERS.includes(modelComponents[0])) { // eslint-disable-next-line no-param-reassign [modelProvider, model] = modelComponents; } } let configurableFieldsCopy = Array.isArray(configurableFields) ? [...configurableFields] : configurableFields; if (!model && configurableFieldsCopy === undefined) { configurableFieldsCopy = ["model", "modelProvider"]; } if (configPrefix && configurableFieldsCopy === undefined) { console.warn(`{ configPrefix: ${configPrefix} } has been set but no fields are configurable. Set ` + `{ configurableFields: [...] } to specify the model params that are ` + `configurable.`); } // eslint-disable-next-line @typescript-eslint/no-explicit-any const paramsCopy = { ...params }; if (configurableFieldsCopy === undefined) { return new ConfigurableModel({ defaultConfig: { ...paramsCopy, model, modelProvider, }, configPrefix, }); } else { if (model) { paramsCopy.model = model; } if (modelProvider) { paramsCopy.modelProvider = modelProvider; } return new ConfigurableModel({ defaultConfig: paramsCopy, configPrefix, configurableFields: configurableFieldsCopy, }); } }