UNPKG

@langchain/openai

Version:
648 lines (647 loc) 20.1 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.AzureChatOpenAI = void 0; const openai_1 = require("openai"); const env_1 = require("@langchain/core/utils/env"); const chat_models_js_1 = require("../chat_models.cjs"); const azure_js_1 = require("../utils/azure.cjs"); /** * Azure OpenAI chat model integration. * * Setup: * Install `@langchain/openai` and set the following environment variables: * * ```bash * npm install @langchain/openai * export AZURE_OPENAI_API_KEY="your-api-key" * export AZURE_OPENAI_API_DEPLOYMENT_NAME="your-deployment-name" * export AZURE_OPENAI_API_VERSION="your-version" * export AZURE_OPENAI_BASE_PATH="your-base-path" * ``` * * ## [Constructor args](https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html#constructor) * * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_openai.ChatOpenAICallOptions.html) * * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc. * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below: * * ```typescript * // When calling `.bind`, call options should be passed via the first argument * const llmWithArgsBound = llm.bind({ * stop: ["\n"], * tools: [...], * }); * * // When calling `.bindTools`, call options should be passed via the second argument * const llmWithTools = llm.bindTools( * [...], * { * tool_choice: "auto", * } * ); * ``` * * ## Examples * * <details open> * <summary><strong>Instantiate</strong></summary> * * ```typescript * import { AzureChatOpenAI } from '@langchain/openai'; * * const llm = new AzureChatOpenAI({ * azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY, // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY * azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME * azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME * azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION, // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION * temperature: 0, * maxTokens: undefined, * timeout: undefined, * maxRetries: 2, * // apiKey: "...", * // baseUrl: "...", * // other params... * }); * ``` * </details> * * <br /> * * <details> * <summary><strong>Invoking</strong></summary> * * ```typescript * const input = `Translate "I love programming" into French.`; * * // Models also accept a list of chat messages or a formatted prompt * const result = await llm.invoke(input); * console.log(result); * ``` * * ```txt * AIMessage { * "id": "chatcmpl-9u4Mpu44CbPjwYFkTbeoZgvzB00Tz", * "content": "J'adore la programmation.", * "response_metadata": { * "tokenUsage": { * "completionTokens": 5, * "promptTokens": 28, * "totalTokens": 33 * }, * "finish_reason": "stop", * "system_fingerprint": "fp_3aa7262c27" * }, * "usage_metadata": { * "input_tokens": 28, * "output_tokens": 5, * "total_tokens": 33 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Streaming Chunks</strong></summary> * * ```typescript * for await (const chunk of await llm.stream(input)) { * console.log(chunk); * } * ``` * * ```txt * AIMessageChunk { * "id": "chatcmpl-9u4NWB7yUeHCKdLr6jP3HpaOYHTqs", * "content": "" * } * AIMessageChunk { * "content": "J" * } * AIMessageChunk { * "content": "'adore" * } * AIMessageChunk { * "content": " la" * } * AIMessageChunk { * "content": " programmation",, * } * AIMessageChunk { * "content": ".",, * } * AIMessageChunk { * "content": "", * "response_metadata": { * "finish_reason": "stop", * "system_fingerprint": "fp_c9aa9c0491" * }, * } * AIMessageChunk { * "content": "", * "usage_metadata": { * "input_tokens": 28, * "output_tokens": 5, * "total_tokens": 33 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Aggregate Streamed Chunks</strong></summary> * * ```typescript * import { AIMessageChunk } from '@langchain/core/messages'; * import { concat } from '@langchain/core/utils/stream'; * * const stream = await llm.stream(input); * let full: AIMessageChunk | undefined; * for await (const chunk of stream) { * full = !full ? chunk : concat(full, chunk); * } * console.log(full); * ``` * * ```txt * AIMessageChunk { * "id": "chatcmpl-9u4PnX6Fy7OmK46DASy0bH6cxn5Xu", * "content": "J'adore la programmation.", * "response_metadata": { * "prompt": 0, * "completion": 0, * "finish_reason": "stop", * }, * "usage_metadata": { * "input_tokens": 28, * "output_tokens": 5, * "total_tokens": 33 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Bind tools</strong></summary> * * ```typescript * import { z } from 'zod'; * * const GetWeather = { * name: "GetWeather", * description: "Get the current weather in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const GetPopulation = { * name: "GetPopulation", * description: "Get the current population in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]); * const aiMsg = await llmWithTools.invoke( * "Which city is hotter today and which is bigger: LA or NY?" * ); * console.log(aiMsg.tool_calls); * ``` * * ```txt * [ * { * name: 'GetWeather', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call', * id: 'call_uPU4FiFzoKAtMxfmPnfQL6UK' * }, * { * name: 'GetWeather', * args: { location: 'New York, NY' }, * type: 'tool_call', * id: 'call_UNkEwuQsHrGYqgDQuH9nPAtX' * }, * { * name: 'GetPopulation', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call', * id: 'call_kL3OXxaq9OjIKqRTpvjaCH14' * }, * { * name: 'GetPopulation', * args: { location: 'New York, NY' }, * type: 'tool_call', * id: 'call_s9KQB1UWj45LLGaEnjz0179q' * } * ] * ``` * </details> * * <br /> * * <details> * <summary><strong>Structured Output</strong></summary> * * ```typescript * import { z } from 'zod'; * * const Joke = z.object({ * setup: z.string().describe("The setup of the joke"), * punchline: z.string().describe("The punchline to the joke"), * rating: z.number().nullable().describe("How funny the joke is, from 1 to 10") * }).describe('Joke to tell user.'); * * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" }); * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats"); * console.log(jokeResult); * ``` * * ```txt * { * setup: 'Why was the cat sitting on the computer?', * punchline: 'Because it wanted to keep an eye on the mouse!', * rating: 7 * } * ``` * </details> * * <br /> * * <details> * <summary><strong>JSON Object Response Format</strong></summary> * * ```typescript * const jsonLlm = llm.bind({ response_format: { type: "json_object" } }); * const jsonLlmAiMsg = await jsonLlm.invoke( * "Return a JSON object with key 'randomInts' and a value of 10 random ints in [0-99]" * ); * console.log(jsonLlmAiMsg.content); * ``` * * ```txt * { * "randomInts": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67] * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Multimodal</strong></summary> * * ```typescript * import { HumanMessage } from '@langchain/core/messages'; * * const imageUrl = "https://example.com/image.jpg"; * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer()); * const base64Image = Buffer.from(imageData).toString('base64'); * * const message = new HumanMessage({ * content: [ * { type: "text", text: "describe the weather in this image" }, * { * type: "image_url", * image_url: { url: `data:image/jpeg;base64,${base64Image}` }, * }, * ] * }); * * const imageDescriptionAiMsg = await llm.invoke([message]); * console.log(imageDescriptionAiMsg.content); * ``` * * ```txt * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions. * ``` * </details> * * <br /> * * <details> * <summary><strong>Usage Metadata</strong></summary> * * ```typescript * const aiMsgForMetadata = await llm.invoke(input); * console.log(aiMsgForMetadata.usage_metadata); * ``` * * ```txt * { input_tokens: 28, output_tokens: 5, total_tokens: 33 } * ``` * </details> * * <br /> * * <details> * <summary><strong>Logprobs</strong></summary> * * ```typescript * const logprobsLlm = new ChatOpenAI({ logprobs: true }); * const aiMsgForLogprobs = await logprobsLlm.invoke(input); * console.log(aiMsgForLogprobs.response_metadata.logprobs); * ``` * * ```txt * { * content: [ * { * token: 'J', * logprob: -0.000050616763, * bytes: [Array], * top_logprobs: [] * }, * { * token: "'", * logprob: -0.01868736, * bytes: [Array], * top_logprobs: [] * }, * { * token: 'ad', * logprob: -0.0000030545007, * bytes: [Array], * top_logprobs: [] * }, * { token: 'ore', logprob: 0, bytes: [Array], top_logprobs: [] }, * { * token: ' la', * logprob: -0.515404, * bytes: [Array], * top_logprobs: [] * }, * { * token: ' programm', * logprob: -0.0000118755715, * bytes: [Array], * top_logprobs: [] * }, * { token: 'ation', logprob: 0, bytes: [Array], top_logprobs: [] }, * { * token: '.', * logprob: -0.0000037697225, * bytes: [Array], * top_logprobs: [] * } * ], * refusal: null * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Response Metadata</strong></summary> * * ```typescript * const aiMsgForResponseMetadata = await llm.invoke(input); * console.log(aiMsgForResponseMetadata.response_metadata); * ``` * * ```txt * { * tokenUsage: { completionTokens: 5, promptTokens: 28, totalTokens: 33 }, * finish_reason: 'stop', * system_fingerprint: 'fp_3aa7262c27' * } * ``` * </details> */ class AzureChatOpenAI extends chat_models_js_1.ChatOpenAI { _llmType() { return "azure_openai"; } get lc_aliases() { return { ...super.lc_aliases, openAIApiKey: "openai_api_key", openAIApiVersion: "openai_api_version", openAIBasePath: "openai_api_base", deploymentName: "deployment_name", azureOpenAIEndpoint: "azure_endpoint", azureOpenAIApiVersion: "openai_api_version", azureOpenAIBasePath: "openai_api_base", azureOpenAIApiDeploymentName: "deployment_name", }; } get lc_secrets() { return { ...super.lc_secrets, azureOpenAIApiKey: "AZURE_OPENAI_API_KEY", }; } get lc_serializable_keys() { return [ ...super.lc_serializable_keys, "azureOpenAIApiKey", "azureOpenAIApiVersion", "azureOpenAIBasePath", "azureOpenAIEndpoint", "azureOpenAIApiInstanceName", "azureOpenAIApiDeploymentName", "deploymentName", "openAIApiKey", "openAIApiVersion", ]; } constructor(fields) { super(fields); Object.defineProperty(this, "azureOpenAIApiVersion", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "azureOpenAIApiKey", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "azureADTokenProvider", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "azureOpenAIApiInstanceName", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "azureOpenAIApiDeploymentName", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "azureOpenAIBasePath", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "azureOpenAIEndpoint", { enumerable: true, configurable: true, writable: true, value: void 0 }); this.azureOpenAIApiKey = fields?.azureOpenAIApiKey ?? fields?.openAIApiKey ?? fields?.apiKey ?? (0, env_1.getEnvironmentVariable)("AZURE_OPENAI_API_KEY"); this.azureOpenAIApiInstanceName = fields?.azureOpenAIApiInstanceName ?? (0, env_1.getEnvironmentVariable)("AZURE_OPENAI_API_INSTANCE_NAME"); this.azureOpenAIApiDeploymentName = fields?.azureOpenAIApiDeploymentName ?? fields?.deploymentName ?? (0, env_1.getEnvironmentVariable)("AZURE_OPENAI_API_DEPLOYMENT_NAME"); this.azureOpenAIApiVersion = fields?.azureOpenAIApiVersion ?? fields?.openAIApiVersion ?? (0, env_1.getEnvironmentVariable)("AZURE_OPENAI_API_VERSION"); this.azureOpenAIBasePath = fields?.azureOpenAIBasePath ?? (0, env_1.getEnvironmentVariable)("AZURE_OPENAI_BASE_PATH"); this.azureOpenAIEndpoint = fields?.azureOpenAIEndpoint ?? (0, env_1.getEnvironmentVariable)("AZURE_OPENAI_ENDPOINT"); this.azureADTokenProvider = fields?.azureADTokenProvider; if (!this.azureOpenAIApiKey && !this.apiKey && !this.azureADTokenProvider) { throw new Error("Azure OpenAI API key or Token Provider not found"); } } getLsParams(options) { const params = super.getLsParams(options); params.ls_provider = "azure"; return params; } _getClientOptions(options) { if (!this.client) { const openAIEndpointConfig = { azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName, azureOpenAIApiKey: this.azureOpenAIApiKey, azureOpenAIBasePath: this.azureOpenAIBasePath, azureADTokenProvider: this.azureADTokenProvider, baseURL: this.clientConfig.baseURL, azureOpenAIEndpoint: this.azureOpenAIEndpoint, }; const endpoint = (0, azure_js_1.getEndpoint)(openAIEndpointConfig); const params = { ...this.clientConfig, baseURL: endpoint, timeout: this.timeout, maxRetries: 0, }; if (!this.azureADTokenProvider) { params.apiKey = openAIEndpointConfig.azureOpenAIApiKey; } if (!params.baseURL) { delete params.baseURL; } let env = (0, env_1.getEnv)(); if (env === "node" || env === "deno") { env = `(${env}/${process.version}; ${process.platform}; ${process.arch})`; } const specifiedUserAgent = params.defaultHeaders?.["User-Agent"]; params.defaultHeaders = { ...params.defaultHeaders, "User-Agent": `langchainjs-azure-openai/2.0.0 (${env})${specifiedUserAgent ? ` ${specifiedUserAgent}` : ""}`, }; this.client = new openai_1.AzureOpenAI({ apiVersion: this.azureOpenAIApiVersion, azureADTokenProvider: this.azureADTokenProvider, deployment: this.azureOpenAIApiDeploymentName, ...params, }); } const requestOptions = { ...this.clientConfig, ...options, }; if (this.azureOpenAIApiKey) { requestOptions.headers = { "api-key": this.azureOpenAIApiKey, ...requestOptions.headers, }; requestOptions.query = { "api-version": this.azureOpenAIApiVersion, ...requestOptions.query, }; } return requestOptions; } // eslint-disable-next-line @typescript-eslint/no-explicit-any toJSON() { const json = super.toJSON(); function isRecord(obj) { return typeof obj === "object" && obj != null; } if (isRecord(json) && isRecord(json.kwargs)) { delete json.kwargs.azure_openai_base_path; delete json.kwargs.azure_openai_api_deployment_name; delete json.kwargs.azure_openai_api_key; delete json.kwargs.azure_openai_api_version; delete json.kwargs.azure_open_ai_base_path; if (!json.kwargs.azure_endpoint && this.azureOpenAIEndpoint) { json.kwargs.azure_endpoint = this.azureOpenAIEndpoint; } if (!json.kwargs.azure_endpoint && this.azureOpenAIBasePath) { const parts = this.azureOpenAIBasePath.split("/openai/deployments/"); if (parts.length === 2 && parts[0].startsWith("http")) { const [endpoint] = parts; json.kwargs.azure_endpoint = endpoint; } } if (!json.kwargs.azure_endpoint && this.azureOpenAIApiInstanceName) { json.kwargs.azure_endpoint = `https://${this.azureOpenAIApiInstanceName}.openai.azure.com/`; } if (!json.kwargs.deployment_name && this.azureOpenAIApiDeploymentName) { json.kwargs.deployment_name = this.azureOpenAIApiDeploymentName; } if (!json.kwargs.deployment_name && this.azureOpenAIBasePath) { const parts = this.azureOpenAIBasePath.split("/openai/deployments/"); if (parts.length === 2) { const [, deployment] = parts; json.kwargs.deployment_name = deployment; } } if (json.kwargs.azure_endpoint && json.kwargs.deployment_name && json.kwargs.openai_api_base) { delete json.kwargs.openai_api_base; } if (json.kwargs.azure_openai_api_instance_name && json.kwargs.azure_endpoint) { delete json.kwargs.azure_openai_api_instance_name; } } return json; } withStructuredOutput(outputSchema, config) { const ensuredConfig = { ...config }; // Not all Azure gpt-4o deployments models support jsonSchema yet if (this.model.startsWith("gpt-4o")) { if (ensuredConfig?.method === undefined) { ensuredConfig.method = "functionCalling"; } } return super.withStructuredOutput(outputSchema, ensuredConfig); } } exports.AzureChatOpenAI = AzureChatOpenAI;