@langchain/openai
Version:
OpenAI integrations for LangChain.js
1,508 lines • 98.8 kB
JavaScript
"use strict";
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
__setModuleDefault(result, mod);
return result;
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.ChatOpenAI = exports._convertMessagesToOpenAIParams = exports.messageToOpenAIRole = void 0;
const z4 = __importStar(require("zod/v4/core"));
const openai_1 = require("openai");
const messages_1 = require("@langchain/core/messages");
const outputs_1 = require("@langchain/core/outputs");
const env_1 = require("@langchain/core/utils/env");
const chat_models_1 = require("@langchain/core/language_models/chat_models");
const base_1 = require("@langchain/core/language_models/base");
const runnables_1 = require("@langchain/core/runnables");
const output_parsers_1 = require("@langchain/core/output_parsers");
const openai_tools_1 = require("@langchain/core/output_parsers/openai_tools");
const zod_1 = require("openai/helpers/zod");
const types_1 = require("@langchain/core/utils/types");
const json_schema_1 = require("@langchain/core/utils/json_schema");
const azure_js_1 = require("./utils/azure.cjs");
const openai_js_1 = require("./utils/openai.cjs");
const openai_format_fndef_js_1 = require("./utils/openai-format-fndef.cjs");
const tools_js_1 = require("./utils/tools.cjs");
function extractGenericMessageCustomRole(message) {
if (message.role !== "system" &&
message.role !== "developer" &&
message.role !== "assistant" &&
message.role !== "user" &&
message.role !== "function" &&
message.role !== "tool") {
console.warn(`Unknown message role: ${message.role}`);
}
return message.role;
}
function messageToOpenAIRole(message) {
const type = message._getType();
switch (type) {
case "system":
return "system";
case "ai":
return "assistant";
case "human":
return "user";
case "function":
return "function";
case "tool":
return "tool";
case "generic": {
if (!messages_1.ChatMessage.isInstance(message))
throw new Error("Invalid generic chat message");
return extractGenericMessageCustomRole(message);
}
default:
throw new Error(`Unknown message type: ${type}`);
}
}
exports.messageToOpenAIRole = messageToOpenAIRole;
const completionsApiContentBlockConverter = {
providerName: "ChatOpenAI",
fromStandardTextBlock(block) {
return { type: "text", text: block.text };
},
fromStandardImageBlock(block) {
if (block.source_type === "url") {
return {
type: "image_url",
image_url: {
url: block.url,
...(block.metadata?.detail
? { detail: block.metadata.detail }
: {}),
},
};
}
if (block.source_type === "base64") {
const url = `data:${block.mime_type ?? ""};base64,${block.data}`;
return {
type: "image_url",
image_url: {
url,
...(block.metadata?.detail
? { detail: block.metadata.detail }
: {}),
},
};
}
throw new Error(`Image content blocks with source_type ${block.source_type} are not supported for ChatOpenAI`);
},
fromStandardAudioBlock(block) {
if (block.source_type === "url") {
const data = (0, messages_1.parseBase64DataUrl)({ dataUrl: block.url });
if (!data) {
throw new Error(`URL audio blocks with source_type ${block.source_type} must be formatted as a data URL for ChatOpenAI`);
}
const rawMimeType = data.mime_type || block.mime_type || "";
let mimeType;
try {
mimeType = (0, messages_1.parseMimeType)(rawMimeType);
}
catch {
throw new Error(`Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`);
}
if (mimeType.type !== "audio" ||
(mimeType.subtype !== "wav" && mimeType.subtype !== "mp3")) {
throw new Error(`Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`);
}
return {
type: "input_audio",
input_audio: {
format: mimeType.subtype,
data: data.data,
},
};
}
if (block.source_type === "base64") {
let mimeType;
try {
mimeType = (0, messages_1.parseMimeType)(block.mime_type ?? "");
}
catch {
throw new Error(`Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`);
}
if (mimeType.type !== "audio" ||
(mimeType.subtype !== "wav" && mimeType.subtype !== "mp3")) {
throw new Error(`Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`);
}
return {
type: "input_audio",
input_audio: {
format: mimeType.subtype,
data: block.data,
},
};
}
throw new Error(`Audio content blocks with source_type ${block.source_type} are not supported for ChatOpenAI`);
},
fromStandardFileBlock(block) {
if (block.source_type === "url") {
const data = (0, messages_1.parseBase64DataUrl)({ dataUrl: block.url });
if (!data) {
throw new Error(`URL file blocks with source_type ${block.source_type} must be formatted as a data URL for ChatOpenAI`);
}
return {
type: "file",
file: {
file_data: block.url,
...(block.metadata?.filename || block.metadata?.name
? {
filename: (block.metadata?.filename ||
block.metadata?.name),
}
: {}),
},
};
}
if (block.source_type === "base64") {
return {
type: "file",
file: {
file_data: `data:${block.mime_type ?? ""};base64,${block.data}`,
...(block.metadata?.filename ||
block.metadata?.name ||
block.metadata?.title
? {
filename: (block.metadata?.filename ||
block.metadata?.name ||
block.metadata?.title),
}
: {}),
},
};
}
if (block.source_type === "id") {
return {
type: "file",
file: {
file_id: block.id,
},
};
}
throw new Error(`File content blocks with source_type ${block.source_type} are not supported for ChatOpenAI`);
},
};
// Used in LangSmith, export is important here
function _convertMessagesToOpenAIParams(messages, model) {
// TODO: Function messages do not support array content, fix cast
return messages.flatMap((message) => {
let role = messageToOpenAIRole(message);
if (role === "system" && isReasoningModel(model)) {
role = "developer";
}
const content = typeof message.content === "string"
? message.content
: message.content.map((m) => {
if ((0, messages_1.isDataContentBlock)(m)) {
return (0, messages_1.convertToProviderContentBlock)(m, completionsApiContentBlockConverter);
}
return m;
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const completionParam = {
role,
content,
};
if (message.name != null) {
completionParam.name = message.name;
}
if (message.additional_kwargs.function_call != null) {
completionParam.function_call = message.additional_kwargs.function_call;
completionParam.content = "";
}
if ((0, messages_1.isAIMessage)(message) && !!message.tool_calls?.length) {
completionParam.tool_calls = message.tool_calls.map(openai_tools_1.convertLangChainToolCallToOpenAI);
completionParam.content = "";
}
else {
if (message.additional_kwargs.tool_calls != null) {
completionParam.tool_calls = message.additional_kwargs.tool_calls;
}
if (message.tool_call_id != null) {
completionParam.tool_call_id = message.tool_call_id;
}
}
if (message.additional_kwargs.audio &&
typeof message.additional_kwargs.audio === "object" &&
"id" in message.additional_kwargs.audio) {
const audioMessage = {
role: "assistant",
audio: {
id: message.additional_kwargs.audio.id,
},
};
return [completionParam, audioMessage];
}
return completionParam;
});
}
exports._convertMessagesToOpenAIParams = _convertMessagesToOpenAIParams;
const _FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__";
function _convertReasoningSummaryToOpenAIResponsesParams(reasoning) {
// combine summary parts that have the the same index and then remove the indexes
const summary = (reasoning.summary.length > 1
? reasoning.summary.reduce((acc, curr) => {
const last = acc.at(-1);
if (last.index === curr.index) {
last.text += curr.text;
}
else {
acc.push(curr);
}
return acc;
}, [{ ...reasoning.summary[0] }])
: reasoning.summary).map((s) => Object.fromEntries(Object.entries(s).filter(([k]) => k !== "index")));
return {
...reasoning,
summary,
};
}
function _convertMessagesToOpenAIResponsesParams(messages, model, zdrEnabled) {
const lastAIMessage = messages.filter((m) => (0, messages_1.isAIMessage)(m)).pop();
const lastAIMessageId = lastAIMessage?.response_metadata?.id;
const newMessages = lastAIMessageId && lastAIMessageId.startsWith("resp_") && !zdrEnabled
? messages.slice(messages.indexOf(lastAIMessage) + 1)
: messages;
return newMessages.flatMap((lcMsg) => {
let role = messageToOpenAIRole(lcMsg);
if (role === "system" && isReasoningModel(model))
role = "developer";
if (role === "function") {
throw new Error("Function messages are not supported in Responses API");
}
if (role === "tool") {
const toolMessage = lcMsg;
// Handle computer call output
if (toolMessage.additional_kwargs?.type === "computer_call_output") {
const output = (() => {
if (typeof toolMessage.content === "string") {
return {
type: "computer_screenshot",
image_url: toolMessage.content,
};
}
if (Array.isArray(toolMessage.content)) {
const oaiScreenshot = toolMessage.content.find((i) => i.type === "computer_screenshot");
if (oaiScreenshot)
return oaiScreenshot;
const lcImage = toolMessage.content.find((i) => i.type === "image_url");
if (lcImage) {
return {
type: "computer_screenshot",
image_url: typeof lcImage.image_url === "string"
? lcImage.image_url
: lcImage.image_url.url,
};
}
}
throw new Error("Invalid computer call output");
})();
return {
type: "computer_call_output",
output,
call_id: toolMessage.tool_call_id,
};
}
return {
type: "function_call_output",
call_id: toolMessage.tool_call_id,
id: toolMessage.id?.startsWith("fc_") ? toolMessage.id : undefined,
output: typeof toolMessage.content !== "string"
? JSON.stringify(toolMessage.content)
: toolMessage.content,
};
}
if (role === "assistant") {
// if we have the original response items, just reuse them
if (!zdrEnabled &&
lcMsg.response_metadata.output != null &&
Array.isArray(lcMsg.response_metadata.output) &&
lcMsg.response_metadata.output.length > 0 &&
lcMsg.response_metadata.output.every((item) => "type" in item)) {
return lcMsg.response_metadata.output;
}
// otherwise, try to reconstruct the response from what we have
const input = [];
// reasoning items
if (!zdrEnabled && lcMsg.additional_kwargs.reasoning != null) {
const isReasoningItem = (item) => typeof item === "object" &&
item != null &&
"type" in item &&
item.type === "reasoning";
if (isReasoningItem(lcMsg.additional_kwargs.reasoning)) {
const reasoningItem = _convertReasoningSummaryToOpenAIResponsesParams(lcMsg.additional_kwargs.reasoning);
input.push(reasoningItem);
}
}
// ai content
let { content } = lcMsg;
if (lcMsg.additional_kwargs.refusal != null) {
if (typeof content === "string") {
content = [{ type: "output_text", text: content, annotations: [] }];
}
content = [
...content,
{ type: "refusal", refusal: lcMsg.additional_kwargs.refusal },
];
}
input.push({
type: "message",
role: "assistant",
...(lcMsg.id && !zdrEnabled ? { id: lcMsg.id } : {}),
content: typeof content === "string"
? content
: content.flatMap((item) => {
if (item.type === "text") {
return {
type: "output_text",
text: item.text,
// @ts-expect-error TODO: add types for `annotations`
annotations: item.annotations ?? [],
};
}
if (item.type === "output_text" || item.type === "refusal") {
return item;
}
return [];
}),
});
// function tool calls and computer use tool calls
const functionCallIds =
// eslint-disable-next-line @typescript-eslint/no-use-before-define
lcMsg.additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY];
if ((0, messages_1.isAIMessage)(lcMsg) && !!lcMsg.tool_calls?.length) {
input.push(...lcMsg.tool_calls.map((toolCall) => ({
type: "function_call",
name: toolCall.name,
arguments: JSON.stringify(toolCall.args),
call_id: toolCall.id,
...(zdrEnabled ? { id: functionCallIds?.[toolCall.id] } : {}),
})));
}
else if (lcMsg.additional_kwargs.tool_calls != null) {
input.push(...lcMsg.additional_kwargs.tool_calls.map((toolCall) => ({
type: "function_call",
name: toolCall.function.name,
call_id: toolCall.id,
...(zdrEnabled ? { id: functionCallIds?.[toolCall.id] } : {}),
arguments: toolCall.function.arguments,
})));
}
const toolOutputs = lcMsg.response_metadata.output?.length
? lcMsg.response_metadata.output
: lcMsg.additional_kwargs.tool_outputs;
let computerCalls = [];
if (toolOutputs != null) {
const castToolOutputs = toolOutputs;
computerCalls = castToolOutputs?.filter((item) => item.type === "computer_call");
if (computerCalls.length > 0)
input.push(...computerCalls);
}
return input;
}
const content = typeof lcMsg.content === "string"
? lcMsg.content
: lcMsg.content.flatMap((item) => {
if ((0, messages_1.isDataContentBlock)(item)) {
return (0, messages_1.convertToProviderContentBlock)(item, completionsApiContentBlockConverter);
}
if (item.type === "text") {
return { type: "input_text", text: item.text };
}
if (item.type === "image_url") {
const image_url = typeof item.image_url === "string"
? item.image_url
: item.image_url.url;
const detail = typeof item.image_url === "string"
? "auto"
: item.image_url.detail;
return { type: "input_image", image_url, detail };
}
if (item.type === "input_text" ||
item.type === "input_image" ||
item.type === "input_file") {
return item;
}
return [];
});
if (role === "user" || role === "system" || role === "developer") {
return { type: "message", role, content };
}
console.warn(`Unsupported role found when converting to OpenAI Responses API: ${role}`);
return [];
});
}
function _convertOpenAIResponsesMessageToBaseMessage(response) {
if (response.error) {
// TODO: add support for `addLangChainErrorFields`
const error = new Error(response.error.message);
error.name = response.error.code;
throw error;
}
let messageId;
const content = [];
const tool_calls = [];
const invalid_tool_calls = [];
const response_metadata = {
model: response.model,
created_at: response.created_at,
id: response.id,
incomplete_details: response.incomplete_details,
metadata: response.metadata,
object: response.object,
status: response.status,
user: response.user,
// for compatibility with chat completion calls.
model_name: response.model,
};
const additional_kwargs = {};
for (const item of response.output) {
if (item.type === "message") {
messageId = item.id;
content.push(...item.content.flatMap((part) => {
if (part.type === "output_text") {
if ("parsed" in part && part.parsed != null) {
additional_kwargs.parsed = part.parsed;
}
return {
type: "text",
text: part.text,
annotations: part.annotations,
};
}
if (part.type === "refusal") {
additional_kwargs.refusal = part.refusal;
return [];
}
return part;
}));
}
else if (item.type === "function_call") {
const fnAdapter = {
function: { name: item.name, arguments: item.arguments },
id: item.call_id,
};
try {
tool_calls.push((0, openai_tools_1.parseToolCall)(fnAdapter, { returnId: true }));
}
catch (e) {
let errMessage;
if (typeof e === "object" &&
e != null &&
"message" in e &&
typeof e.message === "string") {
errMessage = e.message;
}
invalid_tool_calls.push((0, openai_tools_1.makeInvalidToolCall)(fnAdapter, errMessage));
}
additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] ??= {};
if (item.id) {
additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY][item.call_id] = item.id;
}
}
else if (item.type === "reasoning") {
additional_kwargs.reasoning = item;
}
else {
additional_kwargs.tool_outputs ??= [];
additional_kwargs.tool_outputs.push(item);
}
}
return new messages_1.AIMessage({
id: messageId,
content,
tool_calls,
invalid_tool_calls,
usage_metadata: response.usage,
additional_kwargs,
response_metadata,
});
}
function _convertOpenAIResponsesDeltaToBaseMessageChunk(chunk) {
const content = [];
let generationInfo = {};
let usage_metadata;
const tool_call_chunks = [];
const response_metadata = {};
const additional_kwargs = {};
let id;
if (chunk.type === "response.output_text.delta") {
content.push({
type: "text",
text: chunk.delta,
index: chunk.content_index,
});
}
else if (chunk.type === "response.output_text.annotation.added") {
content.push({
type: "text",
text: "",
annotations: [chunk.annotation],
index: chunk.content_index,
});
}
else if (chunk.type === "response.output_item.added" &&
chunk.item.type === "message") {
id = chunk.item.id;
}
else if (chunk.type === "response.output_item.added" &&
chunk.item.type === "function_call") {
tool_call_chunks.push({
type: "tool_call_chunk",
name: chunk.item.name,
args: chunk.item.arguments,
id: chunk.item.call_id,
index: chunk.output_index,
});
additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] = {
[chunk.item.call_id]: chunk.item.id,
};
}
else if (chunk.type === "response.output_item.done" &&
(chunk.item.type === "web_search_call" ||
chunk.item.type === "file_search_call" ||
chunk.item.type === "computer_call")) {
additional_kwargs.tool_outputs = [chunk.item];
}
else if (chunk.type === "response.created") {
response_metadata.id = chunk.response.id;
response_metadata.model_name = chunk.response.model;
response_metadata.model = chunk.response.model;
}
else if (chunk.type === "response.completed") {
const msg = _convertOpenAIResponsesMessageToBaseMessage(chunk.response);
usage_metadata = chunk.response.usage;
if (chunk.response.text?.format?.type === "json_schema") {
additional_kwargs.parsed ??= JSON.parse(msg.text);
}
for (const [key, value] of Object.entries(chunk.response)) {
if (key !== "id")
response_metadata[key] = value;
}
}
else if (chunk.type === "response.function_call_arguments.delta") {
tool_call_chunks.push({
type: "tool_call_chunk",
args: chunk.delta,
index: chunk.output_index,
});
}
else if (chunk.type === "response.web_search_call.completed" ||
chunk.type === "response.file_search_call.completed") {
generationInfo = {
tool_outputs: {
id: chunk.item_id,
type: chunk.type.replace("response.", "").replace(".completed", ""),
status: "completed",
},
};
}
else if (chunk.type === "response.refusal.done") {
additional_kwargs.refusal = chunk.refusal;
}
else if (chunk.type === "response.output_item.added" &&
"item" in chunk &&
chunk.item.type === "reasoning") {
const summary = chunk
.item.summary
? chunk.item.summary.map((s, index) => ({
...s,
index,
}))
: undefined;
additional_kwargs.reasoning = {
// We only capture ID in the first chunk or else the concatenated result of all chunks will
// have an ID field that is repeated once per chunk. There is special handling for the `type`
// field that prevents this, however.
id: chunk.item.id,
type: chunk.item.type,
...(summary ? { summary } : {}),
};
}
else if (chunk.type === "response.reasoning_summary_part.added") {
additional_kwargs.reasoning = {
type: "reasoning",
summary: [{ ...chunk.part, index: chunk.summary_index }],
};
}
else if (chunk.type === "response.reasoning_summary_text.delta") {
additional_kwargs.reasoning = {
type: "reasoning",
summary: [
{ text: chunk.delta, type: "summary_text", index: chunk.summary_index },
],
};
}
else {
return null;
}
return new outputs_1.ChatGenerationChunk({
// Legacy reasons, `onLLMNewToken` should pulls this out
text: content.map((part) => part.text).join(""),
message: new messages_1.AIMessageChunk({
id,
content,
tool_call_chunks,
usage_metadata,
additional_kwargs,
response_metadata,
}),
generationInfo,
});
}
function isBuiltInTool(tool) {
return "type" in tool && tool.type !== "function";
}
function isBuiltInToolChoice(tool_choice) {
return (tool_choice != null &&
typeof tool_choice === "object" &&
"type" in tool_choice &&
tool_choice.type !== "function");
}
function _convertChatOpenAIToolTypeToOpenAITool(tool, fields) {
if ((0, base_1.isOpenAITool)(tool)) {
if (fields?.strict !== undefined) {
return {
...tool,
function: {
...tool.function,
strict: fields.strict,
},
};
}
return tool;
}
return (0, tools_js_1._convertToOpenAITool)(tool, fields);
}
function isReasoningModel(model) {
return model && /^o\d/.test(model);
}
/**
* OpenAI chat model integration.
*
* To use with Azure, import the `AzureChatOpenAI` class.
*
* Setup:
* Install `@langchain/openai` and set an environment variable named `OPENAI_API_KEY`.
*
* ```bash
* npm install @langchain/openai
* export OPENAI_API_KEY="your-api-key"
* ```
*
* ## [Constructor args](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html#constructor)
*
* ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_openai.ChatOpenAICallOptions.html)
*
* Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
* They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:
*
* ```typescript
* // When calling `.withConfig`, call options should be passed via the first argument
* const llmWithArgsBound = llm.withConfig({
* stop: ["\n"],
* tools: [...],
* });
*
* // When calling `.bindTools`, call options should be passed via the second argument
* const llmWithTools = llm.bindTools(
* [...],
* {
* tool_choice: "auto",
* }
* );
* ```
*
* ## Examples
*
* <details open>
* <summary><strong>Instantiate</strong></summary>
*
* ```typescript
* import { ChatOpenAI } from '@langchain/openai';
*
* const llm = new ChatOpenAI({
* model: "gpt-4o",
* temperature: 0,
* maxTokens: undefined,
* timeout: undefined,
* maxRetries: 2,
* // apiKey: "...",
* // baseUrl: "...",
* // organization: "...",
* // other params...
* });
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Invoking</strong></summary>
*
* ```typescript
* const input = `Translate "I love programming" into French.`;
*
* // Models also accept a list of chat messages or a formatted prompt
* const result = await llm.invoke(input);
* console.log(result);
* ```
*
* ```txt
* AIMessage {
* "id": "chatcmpl-9u4Mpu44CbPjwYFkTbeoZgvzB00Tz",
* "content": "J'adore la programmation.",
* "response_metadata": {
* "tokenUsage": {
* "completionTokens": 5,
* "promptTokens": 28,
* "totalTokens": 33
* },
* "finish_reason": "stop",
* "system_fingerprint": "fp_3aa7262c27"
* },
* "usage_metadata": {
* "input_tokens": 28,
* "output_tokens": 5,
* "total_tokens": 33
* }
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Streaming Chunks</strong></summary>
*
* ```typescript
* for await (const chunk of await llm.stream(input)) {
* console.log(chunk);
* }
* ```
*
* ```txt
* AIMessageChunk {
* "id": "chatcmpl-9u4NWB7yUeHCKdLr6jP3HpaOYHTqs",
* "content": ""
* }
* AIMessageChunk {
* "content": "J"
* }
* AIMessageChunk {
* "content": "'adore"
* }
* AIMessageChunk {
* "content": " la"
* }
* AIMessageChunk {
* "content": " programmation",,
* }
* AIMessageChunk {
* "content": ".",,
* }
* AIMessageChunk {
* "content": "",
* "response_metadata": {
* "finish_reason": "stop",
* "system_fingerprint": "fp_c9aa9c0491"
* },
* }
* AIMessageChunk {
* "content": "",
* "usage_metadata": {
* "input_tokens": 28,
* "output_tokens": 5,
* "total_tokens": 33
* }
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Aggregate Streamed Chunks</strong></summary>
*
* ```typescript
* import { AIMessageChunk } from '@langchain/core/messages';
* import { concat } from '@langchain/core/utils/stream';
*
* const stream = await llm.stream(input);
* let full: AIMessageChunk | undefined;
* for await (const chunk of stream) {
* full = !full ? chunk : concat(full, chunk);
* }
* console.log(full);
* ```
*
* ```txt
* AIMessageChunk {
* "id": "chatcmpl-9u4PnX6Fy7OmK46DASy0bH6cxn5Xu",
* "content": "J'adore la programmation.",
* "response_metadata": {
* "prompt": 0,
* "completion": 0,
* "finish_reason": "stop",
* },
* "usage_metadata": {
* "input_tokens": 28,
* "output_tokens": 5,
* "total_tokens": 33
* }
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Bind tools</strong></summary>
*
* ```typescript
* import { z } from 'zod';
*
* const GetWeather = {
* name: "GetWeather",
* description: "Get the current weather in a given location",
* schema: z.object({
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
* }),
* }
*
* const GetPopulation = {
* name: "GetPopulation",
* description: "Get the current population in a given location",
* schema: z.object({
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
* }),
* }
*
* const llmWithTools = llm.bindTools(
* [GetWeather, GetPopulation],
* {
* // strict: true // enforce tool args schema is respected
* }
* );
* const aiMsg = await llmWithTools.invoke(
* "Which city is hotter today and which is bigger: LA or NY?"
* );
* console.log(aiMsg.tool_calls);
* ```
*
* ```txt
* [
* {
* name: 'GetWeather',
* args: { location: 'Los Angeles, CA' },
* type: 'tool_call',
* id: 'call_uPU4FiFzoKAtMxfmPnfQL6UK'
* },
* {
* name: 'GetWeather',
* args: { location: 'New York, NY' },
* type: 'tool_call',
* id: 'call_UNkEwuQsHrGYqgDQuH9nPAtX'
* },
* {
* name: 'GetPopulation',
* args: { location: 'Los Angeles, CA' },
* type: 'tool_call',
* id: 'call_kL3OXxaq9OjIKqRTpvjaCH14'
* },
* {
* name: 'GetPopulation',
* args: { location: 'New York, NY' },
* type: 'tool_call',
* id: 'call_s9KQB1UWj45LLGaEnjz0179q'
* }
* ]
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Structured Output</strong></summary>
*
* ```typescript
* import { z } from 'zod';
*
* const Joke = z.object({
* setup: z.string().describe("The setup of the joke"),
* punchline: z.string().describe("The punchline to the joke"),
* rating: z.number().nullable().describe("How funny the joke is, from 1 to 10")
* }).describe('Joke to tell user.');
*
* const structuredLlm = llm.withStructuredOutput(Joke, {
* name: "Joke",
* strict: true, // Optionally enable OpenAI structured outputs
* });
* const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
* console.log(jokeResult);
* ```
*
* ```txt
* {
* setup: 'Why was the cat sitting on the computer?',
* punchline: 'Because it wanted to keep an eye on the mouse!',
* rating: 7
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>JSON Object Response Format</strong></summary>
*
* ```typescript
* const jsonLlm = llm.withConfig({ response_format: { type: "json_object" } });
* const jsonLlmAiMsg = await jsonLlm.invoke(
* "Return a JSON object with key 'randomInts' and a value of 10 random ints in [0-99]"
* );
* console.log(jsonLlmAiMsg.content);
* ```
*
* ```txt
* {
* "randomInts": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67]
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Multimodal</strong></summary>
*
* ```typescript
* import { HumanMessage } from '@langchain/core/messages';
*
* const imageUrl = "https://example.com/image.jpg";
* const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());
* const base64Image = Buffer.from(imageData).toString('base64');
*
* const message = new HumanMessage({
* content: [
* { type: "text", text: "describe the weather in this image" },
* {
* type: "image_url",
* image_url: { url: `data:image/jpeg;base64,${base64Image}` },
* },
* ]
* });
*
* const imageDescriptionAiMsg = await llm.invoke([message]);
* console.log(imageDescriptionAiMsg.content);
* ```
*
* ```txt
* The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions.
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Usage Metadata</strong></summary>
*
* ```typescript
* const aiMsgForMetadata = await llm.invoke(input);
* console.log(aiMsgForMetadata.usage_metadata);
* ```
*
* ```txt
* { input_tokens: 28, output_tokens: 5, total_tokens: 33 }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Logprobs</strong></summary>
*
* ```typescript
* const logprobsLlm = new ChatOpenAI({ logprobs: true });
* const aiMsgForLogprobs = await logprobsLlm.invoke(input);
* console.log(aiMsgForLogprobs.response_metadata.logprobs);
* ```
*
* ```txt
* {
* content: [
* {
* token: 'J',
* logprob: -0.000050616763,
* bytes: [Array],
* top_logprobs: []
* },
* {
* token: "'",
* logprob: -0.01868736,
* bytes: [Array],
* top_logprobs: []
* },
* {
* token: 'ad',
* logprob: -0.0000030545007,
* bytes: [Array],
* top_logprobs: []
* },
* { token: 'ore', logprob: 0, bytes: [Array], top_logprobs: [] },
* {
* token: ' la',
* logprob: -0.515404,
* bytes: [Array],
* top_logprobs: []
* },
* {
* token: ' programm',
* logprob: -0.0000118755715,
* bytes: [Array],
* top_logprobs: []
* },
* { token: 'ation', logprob: 0, bytes: [Array], top_logprobs: [] },
* {
* token: '.',
* logprob: -0.0000037697225,
* bytes: [Array],
* top_logprobs: []
* }
* ],
* refusal: null
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Response Metadata</strong></summary>
*
* ```typescript
* const aiMsgForResponseMetadata = await llm.invoke(input);
* console.log(aiMsgForResponseMetadata.response_metadata);
* ```
*
* ```txt
* {
* tokenUsage: { completionTokens: 5, promptTokens: 28, totalTokens: 33 },
* finish_reason: 'stop',
* system_fingerprint: 'fp_3aa7262c27'
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>JSON Schema Structured Output</strong></summary>
*
* ```typescript
* const llmForJsonSchema = new ChatOpenAI({
* model: "gpt-4o-2024-08-06",
* }).withStructuredOutput(
* z.object({
* command: z.string().describe("The command to execute"),
* expectedOutput: z.string().describe("The expected output of the command"),
* options: z
* .array(z.string())
* .describe("The options you can pass to the command"),
* }),
* {
* method: "jsonSchema",
* strict: true, // Optional when using the `jsonSchema` method
* }
* );
*
* const jsonSchemaRes = await llmForJsonSchema.invoke(
* "What is the command to list files in a directory?"
* );
* console.log(jsonSchemaRes);
* ```
*
* ```txt
* {
* command: 'ls',
* expectedOutput: 'A list of files and subdirectories within the specified directory.',
* options: [
* '-a: include directory entries whose names begin with a dot (.).',
* '-l: use a long listing format.',
* '-h: with -l, print sizes in human readable format (e.g., 1K, 234M, 2G).',
* '-t: sort by time, newest first.',
* '-r: reverse order while sorting.',
* '-S: sort by file size, largest first.',
* '-R: list subdirectories recursively.'
* ]
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Audio Outputs</strong></summary>
*
* ```typescript
* import { ChatOpenAI } from "@langchain/openai";
*
* const modelWithAudioOutput = new ChatOpenAI({
* model: "gpt-4o-audio-preview",
* // You may also pass these fields to `.withConfig` as a call argument.
* modalities: ["text", "audio"], // Specifies that the model should output audio.
* audio: {
* voice: "alloy",
* format: "wav",
* },
* });
*
* const audioOutputResult = await modelWithAudioOutput.invoke("Tell me a joke about cats.");
* const castMessageContent = audioOutputResult.content[0] as Record<string, any>;
*
* console.log({
* ...castMessageContent,
* data: castMessageContent.data.slice(0, 100) // Sliced for brevity
* })
* ```
*
* ```txt
* {
* id: 'audio_67117718c6008190a3afad3e3054b9b6',
* data: 'UklGRqYwBgBXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAATElTVBoAAABJTkZPSVNGVA4AAABMYXZmNTguMjkuMTAwAGRhdGFg',
* expires_at: 1729201448,
* transcript: 'Sure! Why did the cat sit on the computer? Because it wanted to keep an eye on the mouse!'
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Audio Outputs</strong></summary>
*
* ```typescript
* import { ChatOpenAI } from "@langchain/openai";
*
* const modelWithAudioOutput = new ChatOpenAI({
* model: "gpt-4o-audio-preview",
* // You may also pass these fields to `.withConfig` as a call argument.
* modalities: ["text", "audio"], // Specifies that the model should output audio.
* audio: {
* voice: "alloy",
* format: "wav",
* },
* });
*
* const audioOutputResult = await modelWithAudioOutput.invoke("Tell me a joke about cats.");
* const castAudioContent = audioOutputResult.additional_kwargs.audio as Record<string, any>;
*
* console.log({
* ...castAudioContent,
* data: castAudioContent.data.slice(0, 100) // Sliced for brevity
* })
* ```
*
* ```txt
* {
* id: 'audio_67117718c6008190a3afad3e3054b9b6',
* data: 'UklGRqYwBgBXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAATElTVBoAAABJTkZPSVNGVA4AAABMYXZmNTguMjkuMTAwAGRhdGFg',
* expires_at: 1729201448,
* transcript: 'Sure! Why did the cat sit on the computer? Because it wanted to keep an eye on the mouse!'
* }
* ```
* </details>
*
* <br />
*/
class ChatOpenAI extends chat_models_1.BaseChatModel {
static lc_name() {
return "ChatOpenAI";
}
get callKeys() {
return [
...super.callKeys,
"options",
"function_call",
"functions",
"tools",
"tool_choice",
"promptIndex",
"response_format",
"seed",
"reasoning_effort",
];
}
get lc_secrets() {
return {
openAIApiKey: "OPENAI_API_KEY",
apiKey: "OPENAI_API_KEY",
organization: "OPENAI_ORGANIZATION",
};
}
get lc_aliases() {
return {
modelName: "model",
openAIApiKey: "openai_api_key",
apiKey: "openai_api_key",
};
}
get lc_serializable_keys() {
return [
"configuration",
"logprobs",
"topLogprobs",
"prefixMessages",
"supportsStrictToolCalling",
"modalities",
"audio",
"reasoningEffort",
"temperature",
"maxTokens",
"topP",
"frequencyPenalty",
"presencePenalty",
"n",
"logitBias",
"user",
"streaming",
"streamUsage",
"modelName",
"model",
"modelKwargs",
"stop",
"stopSequences",
"timeout",
"openAIApiKey",
"apiKey",
"cache",
"maxConcurrency",
"maxRetries",
"verbose",
"callbacks",
"tags",
"metadata",
"disableStreaming",
"useResponsesApi",
"zdrEnabled",
"reasoning",
];
}
constructor(fields) {
super(fields ?? {});
Object.defineProperty(this, "lc_serializable", {
enumerable: true,
configurable: true,
writable: true,
value: true
});
Object.defineProperty(this, "temperature", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "topP", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "frequencyPenalty", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "presencePenalty", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "n", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "logitBias", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
/** @deprecated Use "model" instead */
Object.defineProperty(this, "modelName", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "model", {
enumerable: true,
configurable: true,
writable: true,
value: "gpt-3.5-turbo"
});
Object.defineProperty(this, "modelKwargs", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "stop", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "stopSequences", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "user", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "timeout", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "streaming", {
enumerable: true,
configurable: true,
writable: true,
value: false
});
Object.defineProperty(this, "streamUsage", {
enumerable: true,
configurable: true,
writable: true,
value: true
});
Object.defineProperty(this, "maxTokens", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "logprobs", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "topLogprobs", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "openAIApiKey", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "apiKey", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "organization", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "__includeRawResponse", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "client", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "clientConfig", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
/**
* Whether the model supports the `strict` argument when passing in tools.
* If `undefined` the `strict` argument will not be passed to OpenAI.
*/
Object.defineProperty(this, "supportsStrictToolCalling", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "audio", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "modalities", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
/**
* @deprecated Use {@link reasoning} object instead.
*/
Object.defineProperty(this, "reasoningEffort", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
/**
* Options for reasoning models.
*/
Object.defineProperty(this, "reasoning", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
/**
* Whether to use the responses API for all requests. If `false` the responses API will be used
* only when required in order to fulfill the request.
*/
Object.defineProperty(this, "useResponsesApi", {
enumerable: