UNPKG

@langchain/openai

Version:
1,489 lines 83 kB
import { OpenAI as OpenAIClient } from "openai"; import { AIMessage, AIMessageChunk, ChatMessage, ChatMessageChunk, FunctionMessageChunk, HumanMessageChunk, SystemMessageChunk, ToolMessageChunk, isAIMessage, } from "@langchain/core/messages"; import { ChatGenerationChunk, } from "@langchain/core/outputs"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { BaseChatModel, } from "@langchain/core/language_models/chat_models"; import { isOpenAITool, } from "@langchain/core/language_models/base"; import { RunnableLambda, RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; import { JsonOutputParser, StructuredOutputParser, } from "@langchain/core/output_parsers"; import { JsonOutputKeyToolsParser, convertLangChainToolCallToOpenAI, makeInvalidToolCall, parseToolCall, } from "@langchain/core/output_parsers/openai_tools"; import { zodToJsonSchema } from "zod-to-json-schema"; import { zodResponseFormat } from "openai/helpers/zod"; import { getEndpoint } from "./utils/azure.js"; import { formatToOpenAIToolChoice, wrapOpenAIClientError, } from "./utils/openai.js"; import { formatFunctionDefinitions, } from "./utils/openai-format-fndef.js"; import { _convertToOpenAITool } from "./utils/tools.js"; function extractGenericMessageCustomRole(message) { if (message.role !== "system" && message.role !== "developer" && message.role !== "assistant" && message.role !== "user" && message.role !== "function" && message.role !== "tool") { console.warn(`Unknown message role: ${message.role}`); } return message.role; } export function messageToOpenAIRole(message) { const type = message._getType(); switch (type) { case "system": return "system"; case "ai": return "assistant"; case "human": return "user"; case "function": return "function"; case "tool": return "tool"; case "generic": { if (!ChatMessage.isInstance(message)) throw new Error("Invalid generic chat message"); return extractGenericMessageCustomRole(message); } default: throw new Error(`Unknown message type: ${type}`); } } // Used in LangSmith, export is important here export function _convertMessagesToOpenAIParams(messages, model) { // TODO: Function messages do not support array content, fix cast return messages.flatMap((message) => { let role = messageToOpenAIRole(message); if (role === "system" && isReasoningModel(model)) { role = "developer"; } // eslint-disable-next-line @typescript-eslint/no-explicit-any const completionParam = { role, content: message.content, }; if (message.name != null) { completionParam.name = message.name; } if (message.additional_kwargs.function_call != null) { completionParam.function_call = message.additional_kwargs.function_call; completionParam.content = ""; } if (isAIMessage(message) && !!message.tool_calls?.length) { completionParam.tool_calls = message.tool_calls.map(convertLangChainToolCallToOpenAI); completionParam.content = ""; } else { if (message.additional_kwargs.tool_calls != null) { completionParam.tool_calls = message.additional_kwargs.tool_calls; } if (message.tool_call_id != null) { completionParam.tool_call_id = message.tool_call_id; } } if (message.additional_kwargs.audio && typeof message.additional_kwargs.audio === "object" && "id" in message.additional_kwargs.audio) { const audioMessage = { role: "assistant", audio: { id: message.additional_kwargs.audio.id, }, }; return [completionParam, audioMessage]; } return completionParam; }); } const _FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"; function _convertMessagesToOpenAIResponsesParams(messages, model) { return messages.flatMap((lcMsg) => { let role = messageToOpenAIRole(lcMsg); if (role === "system" && isReasoningModel(model)) role = "developer"; if (role === "function") { throw new Error("Function messages are not supported in Responses API"); } if (role === "tool") { const toolMessage = lcMsg; // Handle computer call output if (toolMessage.additional_kwargs?.type === "computer_call_output") { const output = (() => { if (typeof toolMessage.content === "string") { return { type: "computer_screenshot", image_url: toolMessage.content, }; } if (Array.isArray(toolMessage.content)) { const oaiScreenshot = toolMessage.content.find((i) => i.type === "computer_screenshot"); if (oaiScreenshot) return oaiScreenshot; const lcImage = toolMessage.content.find((i) => i.type === "image_url"); if (lcImage) { return { type: "computer_screenshot", image_url: typeof lcImage.image_url === "string" ? lcImage.image_url : lcImage.image_url.url, }; } } throw new Error("Invalid computer call output"); })(); return { type: "computer_call_output", output, call_id: toolMessage.tool_call_id, }; } return { type: "function_call_output", call_id: toolMessage.tool_call_id, id: toolMessage.id, output: typeof toolMessage.content !== "string" ? JSON.stringify(toolMessage.content) : toolMessage.content, }; } if (role === "assistant") { const input = []; // reasoning items if (lcMsg.additional_kwargs.reasoning != null) { const isReasoningItem = (item) => typeof item === "object" && item != null && "type" in item && item.type === "reasoning"; if (isReasoningItem(lcMsg.additional_kwargs.reasoning)) { input.push(lcMsg.additional_kwargs.reasoning); } } // ai content let { content } = lcMsg; if (lcMsg.additional_kwargs.refusal != null) { if (typeof content === "string") { content = [{ type: "output_text", text: content, annotations: [] }]; } content = [ ...content, { type: "refusal", refusal: lcMsg.additional_kwargs.refusal }, ]; } input.push({ type: "message", role: "assistant", content: typeof content === "string" ? content : content.flatMap((item) => { if (item.type === "text") { return { type: "output_text", text: item.text, // @ts-expect-error TODO: add types for `annotations` annotations: item.annotations ?? [], }; } if (item.type === "output_text" || item.type === "refusal") { return item; } return []; }), }); // function tool calls and computer use tool calls const functionCallIds = // eslint-disable-next-line @typescript-eslint/no-use-before-define lcMsg.additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY]; if (isAIMessage(lcMsg) && !!lcMsg.tool_calls?.length) { input.push(...lcMsg.tool_calls.map((toolCall) => ({ type: "function_call", name: toolCall.name, arguments: JSON.stringify(toolCall.args), call_id: toolCall.id, // @ts-expect-error Might come from a non-Responses API message id: functionCallIds?.[toolCall.id], }))); } else if (lcMsg.additional_kwargs.tool_calls != null) { input.push(...lcMsg.additional_kwargs.tool_calls.map((toolCall) => ({ type: "function_call", name: toolCall.function.name, call_id: toolCall.id, // @ts-expect-error Might come from a non-Responses API message id: functionCallIds?.[toolCall.id], arguments: toolCall.function.arguments, }))); } const toolOutputs = lcMsg.response_metadata.output?.length ? lcMsg.response_metadata.output : lcMsg.additional_kwargs.tool_outputs; if (toolOutputs != null) { const castToolOutputs = toolOutputs; const reasoningCalls = castToolOutputs?.filter((item) => item.type === "reasoning"); const computerCalls = castToolOutputs?.filter((item) => item.type === "computer_call"); // NOTE: Reasoning outputs must be passed to the model BEFORE computer calls. if (reasoningCalls.length > 0 && computerCalls.length > 0) { input.push(...reasoningCalls); } if (computerCalls.length > 0) input.push(...computerCalls); } return input; } if (role === "user") { return { type: "message", role: "user", content: typeof lcMsg.content === "string" ? lcMsg.content : lcMsg.content.flatMap((item) => { if (item.type === "text") { return { type: "input_text", text: item.text }; } if (item.type === "image_url") { const image_url = typeof item.image_url === "string" ? item.image_url : item.image_url.url; const detail = typeof item.image_url === "string" ? "auto" : item.image_url.detail; return { type: "input_image", image_url, detail }; } if (item.type === "input_text" || item.type === "input_image" || item.type === "input_file") { return item; } return []; }), }; } return []; }); } function _convertOpenAIResponsesMessageToBaseMessage(response) { if (response.error) { // TODO: add support for `addLangChainErrorFields` const error = new Error(response.error.message); error.name = response.error.code; throw error; } const content = []; const tool_calls = []; const invalid_tool_calls = []; const response_metadata = { model: response.model, created_at: response.created_at, id: response.id, incomplete_details: response.incomplete_details, metadata: response.metadata, object: response.object, status: response.status, user: response.user, // for compatibility with chat completion calls. model_name: response.model, }; const additional_kwargs = {}; for (const item of response.output) { if (item.type === "message") { content.push(...item.content.flatMap((part) => { if (part.type === "output_text") { if ("parsed" in part && part.parsed != null) { additional_kwargs.parsed = part.parsed; } return { type: "text", text: part.text, annotations: part.annotations, }; } if (part.type === "refusal") { additional_kwargs.refusal = part.refusal; return []; } return part; })); } else if (item.type === "function_call") { const fnAdapter = { function: { name: item.name, arguments: item.arguments }, id: item.call_id, }; try { tool_calls.push(parseToolCall(fnAdapter, { returnId: true })); } catch (e) { let errMessage; if (typeof e === "object" && e != null && "message" in e && typeof e.message === "string") { errMessage = e.message; } invalid_tool_calls.push(makeInvalidToolCall(fnAdapter, errMessage)); } additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] ??= {}; additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY][item.call_id] = item.id; } else if (item.type === "reasoning") { additional_kwargs.reasoning = item; } else { additional_kwargs.tool_outputs ??= []; additional_kwargs.tool_outputs.push(item); } } return new AIMessage({ id: response.id, content, tool_calls, invalid_tool_calls, usage_metadata: response.usage, additional_kwargs, response_metadata, }); } function _convertOpenAIResponsesDeltaToBaseMessageChunk(chunk) { const content = []; let generationInfo = {}; let usage_metadata; const tool_call_chunks = []; const response_metadata = {}; const additional_kwargs = {}; let id; if (chunk.type === "response.output_text.delta") { content.push({ type: "text", text: chunk.delta, index: chunk.content_index, }); } else if (chunk.type === "response.output_text.annotation.added") { content.push({ type: "text", text: "", annotations: [chunk.annotation], index: chunk.content_index, }); } else if (chunk.type === "response.output_item.added" && chunk.item.type === "message") { id = chunk.item.id; } else if (chunk.type === "response.output_item.added" && chunk.item.type === "function_call") { tool_call_chunks.push({ type: "tool_call_chunk", name: chunk.item.name, args: chunk.item.arguments, id: chunk.item.id, index: chunk.output_index, }); additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] = { [chunk.item.call_id]: chunk.item.id, }; } else if (chunk.type === "response.output_item.done" && (chunk.item.type === "web_search_call" || chunk.item.type === "file_search_call" || chunk.item.type === "computer_call")) { additional_kwargs.tool_outputs = [chunk.item]; } else if (chunk.type === "response.created") { response_metadata.id = chunk.response.id; response_metadata.model_name = chunk.response.model; response_metadata.model = chunk.response.model; } else if (chunk.type === "response.completed") { const msg = _convertOpenAIResponsesMessageToBaseMessage(chunk.response); usage_metadata = chunk.response.usage; if (chunk.response.text?.format?.type === "json_schema") { additional_kwargs.parsed ??= JSON.parse(msg.text); } for (const [key, value] of Object.entries(chunk.response)) { if (key !== "id") response_metadata[key] = value; } } else if (chunk.type === "response.function_call_arguments.delta") { tool_call_chunks.push({ type: "tool_call_chunk", args: chunk.delta, index: chunk.output_index, }); } else if (chunk.type === "response.web_search_call.completed" || chunk.type === "response.file_search_call.completed") { generationInfo = { tool_outputs: { id: chunk.item_id, type: chunk.type.replace("response.", "").replace(".completed", ""), status: "completed", }, }; } else if (chunk.type === "response.refusal.done") { additional_kwargs.refusal = chunk.refusal; } else { return null; } return new ChatGenerationChunk({ // Legacy reasons, `onLLMNewToken` should pulls this out text: content.map((part) => part.text).join(""), message: new AIMessageChunk({ id, content, tool_call_chunks, usage_metadata, additional_kwargs, response_metadata, }), generationInfo, }); } function isBuiltInTool(tool) { return "type" in tool && tool.type !== "function"; } function isBuiltInToolChoice(tool_choice) { return (tool_choice != null && typeof tool_choice === "object" && "type" in tool_choice && tool_choice.type !== "function"); } function _convertChatOpenAIToolTypeToOpenAITool(tool, fields) { if (isOpenAITool(tool)) { if (fields?.strict !== undefined) { return { ...tool, function: { ...tool.function, strict: fields.strict, }, }; } return tool; } return _convertToOpenAITool(tool, fields); } function isReasoningModel(model) { return model?.startsWith("o1") || model?.startsWith("o3"); } /** * OpenAI chat model integration. * * To use with Azure, import the `AzureChatOpenAI` class. * * Setup: * Install `@langchain/openai` and set an environment variable named `OPENAI_API_KEY`. * * ```bash * npm install @langchain/openai * export OPENAI_API_KEY="your-api-key" * ``` * * ## [Constructor args](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html#constructor) * * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_openai.ChatOpenAICallOptions.html) * * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc. * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below: * * ```typescript * // When calling `.bind`, call options should be passed via the first argument * const llmWithArgsBound = llm.bind({ * stop: ["\n"], * tools: [...], * }); * * // When calling `.bindTools`, call options should be passed via the second argument * const llmWithTools = llm.bindTools( * [...], * { * tool_choice: "auto", * } * ); * ``` * * ## Examples * * <details open> * <summary><strong>Instantiate</strong></summary> * * ```typescript * import { ChatOpenAI } from '@langchain/openai'; * * const llm = new ChatOpenAI({ * model: "gpt-4o", * temperature: 0, * maxTokens: undefined, * timeout: undefined, * maxRetries: 2, * // apiKey: "...", * // baseUrl: "...", * // organization: "...", * // other params... * }); * ``` * </details> * * <br /> * * <details> * <summary><strong>Invoking</strong></summary> * * ```typescript * const input = `Translate "I love programming" into French.`; * * // Models also accept a list of chat messages or a formatted prompt * const result = await llm.invoke(input); * console.log(result); * ``` * * ```txt * AIMessage { * "id": "chatcmpl-9u4Mpu44CbPjwYFkTbeoZgvzB00Tz", * "content": "J'adore la programmation.", * "response_metadata": { * "tokenUsage": { * "completionTokens": 5, * "promptTokens": 28, * "totalTokens": 33 * }, * "finish_reason": "stop", * "system_fingerprint": "fp_3aa7262c27" * }, * "usage_metadata": { * "input_tokens": 28, * "output_tokens": 5, * "total_tokens": 33 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Streaming Chunks</strong></summary> * * ```typescript * for await (const chunk of await llm.stream(input)) { * console.log(chunk); * } * ``` * * ```txt * AIMessageChunk { * "id": "chatcmpl-9u4NWB7yUeHCKdLr6jP3HpaOYHTqs", * "content": "" * } * AIMessageChunk { * "content": "J" * } * AIMessageChunk { * "content": "'adore" * } * AIMessageChunk { * "content": " la" * } * AIMessageChunk { * "content": " programmation",, * } * AIMessageChunk { * "content": ".",, * } * AIMessageChunk { * "content": "", * "response_metadata": { * "finish_reason": "stop", * "system_fingerprint": "fp_c9aa9c0491" * }, * } * AIMessageChunk { * "content": "", * "usage_metadata": { * "input_tokens": 28, * "output_tokens": 5, * "total_tokens": 33 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Aggregate Streamed Chunks</strong></summary> * * ```typescript * import { AIMessageChunk } from '@langchain/core/messages'; * import { concat } from '@langchain/core/utils/stream'; * * const stream = await llm.stream(input); * let full: AIMessageChunk | undefined; * for await (const chunk of stream) { * full = !full ? chunk : concat(full, chunk); * } * console.log(full); * ``` * * ```txt * AIMessageChunk { * "id": "chatcmpl-9u4PnX6Fy7OmK46DASy0bH6cxn5Xu", * "content": "J'adore la programmation.", * "response_metadata": { * "prompt": 0, * "completion": 0, * "finish_reason": "stop", * }, * "usage_metadata": { * "input_tokens": 28, * "output_tokens": 5, * "total_tokens": 33 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Bind tools</strong></summary> * * ```typescript * import { z } from 'zod'; * * const GetWeather = { * name: "GetWeather", * description: "Get the current weather in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const GetPopulation = { * name: "GetPopulation", * description: "Get the current population in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const llmWithTools = llm.bindTools( * [GetWeather, GetPopulation], * { * // strict: true // enforce tool args schema is respected * } * ); * const aiMsg = await llmWithTools.invoke( * "Which city is hotter today and which is bigger: LA or NY?" * ); * console.log(aiMsg.tool_calls); * ``` * * ```txt * [ * { * name: 'GetWeather', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call', * id: 'call_uPU4FiFzoKAtMxfmPnfQL6UK' * }, * { * name: 'GetWeather', * args: { location: 'New York, NY' }, * type: 'tool_call', * id: 'call_UNkEwuQsHrGYqgDQuH9nPAtX' * }, * { * name: 'GetPopulation', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call', * id: 'call_kL3OXxaq9OjIKqRTpvjaCH14' * }, * { * name: 'GetPopulation', * args: { location: 'New York, NY' }, * type: 'tool_call', * id: 'call_s9KQB1UWj45LLGaEnjz0179q' * } * ] * ``` * </details> * * <br /> * * <details> * <summary><strong>Structured Output</strong></summary> * * ```typescript * import { z } from 'zod'; * * const Joke = z.object({ * setup: z.string().describe("The setup of the joke"), * punchline: z.string().describe("The punchline to the joke"), * rating: z.number().nullable().describe("How funny the joke is, from 1 to 10") * }).describe('Joke to tell user.'); * * const structuredLlm = llm.withStructuredOutput(Joke, { * name: "Joke", * strict: true, // Optionally enable OpenAI structured outputs * }); * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats"); * console.log(jokeResult); * ``` * * ```txt * { * setup: 'Why was the cat sitting on the computer?', * punchline: 'Because it wanted to keep an eye on the mouse!', * rating: 7 * } * ``` * </details> * * <br /> * * <details> * <summary><strong>JSON Object Response Format</strong></summary> * * ```typescript * const jsonLlm = llm.bind({ response_format: { type: "json_object" } }); * const jsonLlmAiMsg = await jsonLlm.invoke( * "Return a JSON object with key 'randomInts' and a value of 10 random ints in [0-99]" * ); * console.log(jsonLlmAiMsg.content); * ``` * * ```txt * { * "randomInts": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67] * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Multimodal</strong></summary> * * ```typescript * import { HumanMessage } from '@langchain/core/messages'; * * const imageUrl = "https://example.com/image.jpg"; * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer()); * const base64Image = Buffer.from(imageData).toString('base64'); * * const message = new HumanMessage({ * content: [ * { type: "text", text: "describe the weather in this image" }, * { * type: "image_url", * image_url: { url: `data:image/jpeg;base64,${base64Image}` }, * }, * ] * }); * * const imageDescriptionAiMsg = await llm.invoke([message]); * console.log(imageDescriptionAiMsg.content); * ``` * * ```txt * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions. * ``` * </details> * * <br /> * * <details> * <summary><strong>Usage Metadata</strong></summary> * * ```typescript * const aiMsgForMetadata = await llm.invoke(input); * console.log(aiMsgForMetadata.usage_metadata); * ``` * * ```txt * { input_tokens: 28, output_tokens: 5, total_tokens: 33 } * ``` * </details> * * <br /> * * <details> * <summary><strong>Logprobs</strong></summary> * * ```typescript * const logprobsLlm = new ChatOpenAI({ logprobs: true }); * const aiMsgForLogprobs = await logprobsLlm.invoke(input); * console.log(aiMsgForLogprobs.response_metadata.logprobs); * ``` * * ```txt * { * content: [ * { * token: 'J', * logprob: -0.000050616763, * bytes: [Array], * top_logprobs: [] * }, * { * token: "'", * logprob: -0.01868736, * bytes: [Array], * top_logprobs: [] * }, * { * token: 'ad', * logprob: -0.0000030545007, * bytes: [Array], * top_logprobs: [] * }, * { token: 'ore', logprob: 0, bytes: [Array], top_logprobs: [] }, * { * token: ' la', * logprob: -0.515404, * bytes: [Array], * top_logprobs: [] * }, * { * token: ' programm', * logprob: -0.0000118755715, * bytes: [Array], * top_logprobs: [] * }, * { token: 'ation', logprob: 0, bytes: [Array], top_logprobs: [] }, * { * token: '.', * logprob: -0.0000037697225, * bytes: [Array], * top_logprobs: [] * } * ], * refusal: null * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Response Metadata</strong></summary> * * ```typescript * const aiMsgForResponseMetadata = await llm.invoke(input); * console.log(aiMsgForResponseMetadata.response_metadata); * ``` * * ```txt * { * tokenUsage: { completionTokens: 5, promptTokens: 28, totalTokens: 33 }, * finish_reason: 'stop', * system_fingerprint: 'fp_3aa7262c27' * } * ``` * </details> * * <br /> * * <details> * <summary><strong>JSON Schema Structured Output</strong></summary> * * ```typescript * const llmForJsonSchema = new ChatOpenAI({ * model: "gpt-4o-2024-08-06", * }).withStructuredOutput( * z.object({ * command: z.string().describe("The command to execute"), * expectedOutput: z.string().describe("The expected output of the command"), * options: z * .array(z.string()) * .describe("The options you can pass to the command"), * }), * { * method: "jsonSchema", * strict: true, // Optional when using the `jsonSchema` method * } * ); * * const jsonSchemaRes = await llmForJsonSchema.invoke( * "What is the command to list files in a directory?" * ); * console.log(jsonSchemaRes); * ``` * * ```txt * { * command: 'ls', * expectedOutput: 'A list of files and subdirectories within the specified directory.', * options: [ * '-a: include directory entries whose names begin with a dot (.).', * '-l: use a long listing format.', * '-h: with -l, print sizes in human readable format (e.g., 1K, 234M, 2G).', * '-t: sort by time, newest first.', * '-r: reverse order while sorting.', * '-S: sort by file size, largest first.', * '-R: list subdirectories recursively.' * ] * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Audio Outputs</strong></summary> * * ```typescript * import { ChatOpenAI } from "@langchain/openai"; * * const modelWithAudioOutput = new ChatOpenAI({ * model: "gpt-4o-audio-preview", * // You may also pass these fields to `.bind` as a call argument. * modalities: ["text", "audio"], // Specifies that the model should output audio. * audio: { * voice: "alloy", * format: "wav", * }, * }); * * const audioOutputResult = await modelWithAudioOutput.invoke("Tell me a joke about cats."); * const castMessageContent = audioOutputResult.content[0] as Record<string, any>; * * console.log({ * ...castMessageContent, * data: castMessageContent.data.slice(0, 100) // Sliced for brevity * }) * ``` * * ```txt * { * id: 'audio_67117718c6008190a3afad3e3054b9b6', * data: 'UklGRqYwBgBXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAATElTVBoAAABJTkZPSVNGVA4AAABMYXZmNTguMjkuMTAwAGRhdGFg', * expires_at: 1729201448, * transcript: 'Sure! Why did the cat sit on the computer? Because it wanted to keep an eye on the mouse!' * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Audio Outputs</strong></summary> * * ```typescript * import { ChatOpenAI } from "@langchain/openai"; * * const modelWithAudioOutput = new ChatOpenAI({ * model: "gpt-4o-audio-preview", * // You may also pass these fields to `.bind` as a call argument. * modalities: ["text", "audio"], // Specifies that the model should output audio. * audio: { * voice: "alloy", * format: "wav", * }, * }); * * const audioOutputResult = await modelWithAudioOutput.invoke("Tell me a joke about cats."); * const castAudioContent = audioOutputResult.additional_kwargs.audio as Record<string, any>; * * console.log({ * ...castAudioContent, * data: castAudioContent.data.slice(0, 100) // Sliced for brevity * }) * ``` * * ```txt * { * id: 'audio_67117718c6008190a3afad3e3054b9b6', * data: 'UklGRqYwBgBXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAATElTVBoAAABJTkZPSVNGVA4AAABMYXZmNTguMjkuMTAwAGRhdGFg', * expires_at: 1729201448, * transcript: 'Sure! Why did the cat sit on the computer? Because it wanted to keep an eye on the mouse!' * } * ``` * </details> * * <br /> */ export class ChatOpenAI extends BaseChatModel { static lc_name() { return "ChatOpenAI"; } get callKeys() { return [ ...super.callKeys, "options", "function_call", "functions", "tools", "tool_choice", "promptIndex", "response_format", "seed", "reasoning_effort", ]; } get lc_secrets() { return { openAIApiKey: "OPENAI_API_KEY", apiKey: "OPENAI_API_KEY", organization: "OPENAI_ORGANIZATION", }; } get lc_aliases() { return { modelName: "model", openAIApiKey: "openai_api_key", apiKey: "openai_api_key", }; } get lc_serializable_keys() { return [ "configuration", "logprobs", "topLogprobs", "prefixMessages", "supportsStrictToolCalling", "modalities", "audio", "reasoningEffort", "temperature", "maxTokens", "topP", "frequencyPenalty", "presencePenalty", "n", "logitBias", "user", "streaming", "streamUsage", "modelName", "model", "modelKwargs", "stop", "stopSequences", "timeout", "openAIApiKey", "apiKey", "cache", "maxConcurrency", "maxRetries", "verbose", "callbacks", "tags", "metadata", "disableStreaming", ]; } constructor(fields) { super(fields ?? {}); Object.defineProperty(this, "lc_serializable", { enumerable: true, configurable: true, writable: true, value: true }); Object.defineProperty(this, "temperature", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "topP", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "frequencyPenalty", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "presencePenalty", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "n", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "logitBias", { enumerable: true, configurable: true, writable: true, value: void 0 }); /** @deprecated Use "model" instead */ Object.defineProperty(this, "modelName", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "model", { enumerable: true, configurable: true, writable: true, value: "gpt-3.5-turbo" }); Object.defineProperty(this, "modelKwargs", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "stop", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "stopSequences", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "user", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "timeout", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "streaming", { enumerable: true, configurable: true, writable: true, value: false }); Object.defineProperty(this, "streamUsage", { enumerable: true, configurable: true, writable: true, value: true }); Object.defineProperty(this, "maxTokens", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "logprobs", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "topLogprobs", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "openAIApiKey", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "apiKey", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "organization", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "__includeRawResponse", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "client", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "clientConfig", { enumerable: true, configurable: true, writable: true, value: void 0 }); /** * Whether the model supports the `strict` argument when passing in tools. * If `undefined` the `strict` argument will not be passed to OpenAI. */ Object.defineProperty(this, "supportsStrictToolCalling", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "audio", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "modalities", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "reasoningEffort", { enumerable: true, configurable: true, writable: true, value: void 0 }); Object.defineProperty(this, "useResponsesApi", { enumerable: true, configurable: true, writable: true, value: false }); this.openAIApiKey = fields?.apiKey ?? fields?.openAIApiKey ?? fields?.configuration?.apiKey ?? getEnvironmentVariable("OPENAI_API_KEY"); this.apiKey = this.openAIApiKey; this.organization = fields?.configuration?.organization ?? getEnvironmentVariable("OPENAI_ORGANIZATION"); this.model = fields?.model ?? fields?.modelName ?? this.model; this.modelName = this.model; this.modelKwargs = fields?.modelKwargs ?? {}; this.timeout = fields?.timeout; this.temperature = fields?.temperature ?? this.temperature; this.topP = fields?.topP ?? this.topP; this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty; this.presencePenalty = fields?.presencePenalty ?? this.presencePenalty; this.logprobs = fields?.logprobs; this.topLogprobs = fields?.topLogprobs; this.n = fields?.n ?? this.n; this.logitBias = fields?.logitBias; this.stop = fields?.stopSequences ?? fields?.stop; this.stopSequences = this.stop; this.user = fields?.user; this.__includeRawResponse = fields?.__includeRawResponse; this.audio = fields?.audio; this.modalities = fields?.modalities; this.reasoningEffort = fields?.reasoningEffort; this.maxTokens = fields?.maxCompletionTokens ?? fields?.maxTokens; this.useResponsesApi = fields?.useResponsesApi ?? this.useResponsesApi; if (this.model === "o1") { this.disableStreaming = true; } this.streaming = fields?.streaming ?? false; this.streamUsage = fields?.streamUsage ?? this.streamUsage; this.clientConfig = { apiKey: this.apiKey, organization: this.organization, dangerouslyAllowBrowser: true, ...fields?.configuration, }; // If `supportsStrictToolCalling` is explicitly set, use that value. // Else leave undefined so it's not passed to OpenAI. if (fields?.supportsStrictToolCalling !== undefined) { this.supportsStrictToolCalling = fields.supportsStrictToolCalling; } } getLsParams(options) { const params = this.invocationParams(options); return { ls_provider: "openai", ls_model_name: this.model, ls_model_type: "chat", ls_temperature: params.temperature ?? undefined, ls_max_tokens: params.max_tokens ?? undefined, ls_stop: options.stop, }; } bindTools(tools, kwargs) { let strict; if (kwargs?.strict !== undefined) { strict = kwargs.strict; } else if (this.supportsStrictToolCalling !== undefined) { strict = this.supportsStrictToolCalling; } return this.bind({ tools: tools.map((tool) => isBuiltInTool(tool) ? tool : _convertChatOpenAIToolTypeToOpenAITool(tool, { strict })), ...kwargs, }); } createResponseFormat(resFormat) { if (resFormat && resFormat.type === "json_schema" && resFormat.json_schema.schema && isZodSchema(resFormat.json_schema.schema)) { return zodResponseFormat(resFormat.json_schema.schema, resFormat.json_schema.name, { description: resFormat.json_schema.description, }); } return resFormat; } /** * Get the parameters used to invoke the model */ invocationParams(options, extra) { let strict; if (options?.strict !== undefined) { strict = options.strict; } else if (this.supportsStrictToolCalling !== undefined) { strict = this.supportsStrictToolCalling; } if (this._useResponseApi(options)) { const params = { model: this.model, temperature: this.temperature, top_p: this.topP, user: this.user, // if include_usage is set or streamUsage then stream must be set to true. stream: this.streaming, previous_response_id: options?.previous_response_id, truncation: options?.truncation, include: options?.include, tools: options?.tools?.length ? options.tools .map((tool) => { if (isBuiltInTool(tool)) { return tool; } else if (isOpenAITool(tool)) { return { type: "function", name: tool.function.name, parameters: tool.function.parameters, description: tool.function.description, strict, }; } else { return null; } }) .filter((tool) => tool !== null) : undefined, tool_choice: isBuiltInToolChoice(options?.tool_choice) ? options?.tool_choice : (() => { const formatted = formatToOpenAIToolChoice(options?.tool_choice); if (typeof formatted === "object" && "type" in formatted) { return { type: "function", name: formatted.function.name }; } else { return undefined; } })(), text: (() => { if (options?.text) return options.text; const format = this.createResponseFormat(options?.response_format); if (format?.type === "json_schema") { if (format.json_schema.schema != null) { return { format: { type: "json_schema", schema: format.json_schema.schema, description: format.json_schema.description, name: format.json_schema.name, strict: format.json_schema.strict, }, }; } return undefined; } return { format }; })(), parallel_tool_calls: options?.parallel_tool_calls, max_output_tokens: this.maxTokens === -1 ? undefined : this.maxTokens, ...this.modelKwargs, }; const reasoningEffort = options?.reasoning_effort ?? this.reasoningEffort; if (reasoningEffort !== undefined) { params.reasoning = { effort: reasoningEffort }; } // eslint-disable-next-line @typescript-eslint/no-explicit-any return params; } let streamOptionsConfig = {}; if (options?.stream_options !== undefined) { streamOptionsConfig = { stream_options: options.stream_options }; } else if (this.streamUsage && (this.streaming || extra?.streaming)) { streamOptionsConfig = { stream_options: { include_usage: true } }; } const params = { model: this.model, temperature: this.temperature, top_p: this.topP, frequency_penalty: this.frequencyPenalty, presence_penalty: this.presencePenalty, logprobs: this.logprobs, top_logprobs: this.topLogprobs, n: this.n, logit_bias: this.logitBias, stop: options?.stop ?? this.stopSequences, user: this.user, // if include_usage is set or streamUsage then stream must be set to true. stream: this.streaming, functions: options?.functions, function_call: options?.function_call, tools: options?.tools?.length ? options.tools.map((tool) => _convertChatOpenAIToolTypeToOpenAITool(tool, { strict })) : undefined, tool_choice: formatToOpenAIToolChoice(options?.tool_choice), response_format: this.createResponseFormat(options?.response_format), seed: options?.seed, ...streamOptionsConfig, parallel_tool_calls: options?.parallel_tool_calls, ...(this.audio || options?.audio ? { audio: this.audio || options?.audio } : {}), ...(this.modalities || options?.modalities ? { modalities: this.modalities || options?.modalities } : {}), ...this.modelKwargs, }; if (options?.prediction !== undefined) { params.prediction = options.prediction; } const reasoningEffort = options?.reasoning_effort ?? this.reasoningEffort; if (reasoningEffort !== undefined) { params.reasoning_effort = reasoningEffort; } if (isReasoningModel(params.model)) { params.max_completion_tokens = this.maxTokens === -1 ? undefined : this.maxTokens; } else { params.max_tokens = this.maxTokens === -1 ? undefined : this.maxTokens; } // eslint-disable-next-line @typescript-eslint/no-explicit-any return params; } _convertOpenAIChatCompletionMessageToBaseMessage(message, rawResponse) { const rawToolCalls = message.tool_calls;