@langchain/openai
Version:
OpenAI integrations for LangChain.js
1 lines • 16.5 kB
Source Map (JSON)
{"version":3,"file":"responses.d.cts","names":["OpenAI","OpenAIClient","AIMessage","BaseMessage","UsageMetadata","ChatGenerationChunk","ChatOpenAIReasoningSummary","Converter","ExcludeController","T","ResponsesCreate","Responses","ResponsesParse","ResponsesCreateInvoke","ReturnType","Awaited","ResponsesParseInvoke","ResponsesInputItem","ResponseInputItem","convertResponsesUsageToUsageMetadata","ResponseUsage","convertResponsesMessageToAIMessage","convertReasoningSummaryToResponsesReasoningItem","ResponseReasoningItem","convertResponsesDeltaToChatGenerationChunk","ResponseStreamEvent","convertStandardContentMessageToResponsesInput","convertMessagesToResponsesInput"],"sources":["../../src/converters/responses.d.ts"],"sourcesContent":["import { OpenAI as OpenAIClient } from \"openai\";\nimport { AIMessage, type BaseMessage, type UsageMetadata } from \"@langchain/core/messages\";\nimport { ChatGenerationChunk } from \"@langchain/core/outputs\";\nimport { ChatOpenAIReasoningSummary } from \"../types.js\";\nimport { Converter } from \"@langchain/core/utils/format\";\ntype ExcludeController<T> = T extends {\n controller: unknown;\n} ? never : T;\nexport type ResponsesCreate = OpenAIClient.Responses[\"create\"];\nexport type ResponsesParse = OpenAIClient.Responses[\"parse\"];\nexport type ResponsesCreateInvoke = ExcludeController<Awaited<ReturnType<ResponsesCreate>>>;\nexport type ResponsesParseInvoke = ExcludeController<Awaited<ReturnType<ResponsesParse>>>;\nexport type ResponsesInputItem = OpenAIClient.Responses.ResponseInputItem;\n/**\n * Converts OpenAI Responses API usage statistics to LangChain's UsageMetadata format.\n *\n * This converter transforms token usage information from OpenAI's Responses API into\n * the standardized UsageMetadata format used throughout LangChain. It handles both\n * basic token counts and detailed token breakdowns including cached tokens and\n * reasoning tokens.\n *\n * @param usage - The usage statistics object from OpenAI's Responses API containing\n * token counts and optional detailed breakdowns.\n *\n * @returns A UsageMetadata object containing:\n * - `input_tokens`: Total number of tokens in the input/prompt (defaults to 0 if not provided)\n * - `output_tokens`: Total number of tokens in the model's output (defaults to 0 if not provided)\n * - `total_tokens`: Combined total of input and output tokens (defaults to 0 if not provided)\n * - `input_token_details`: Object containing detailed input token information:\n * - `cache_read`: Number of tokens read from cache (only included if available)\n * - `output_token_details`: Object containing detailed output token information:\n * - `reasoning`: Number of tokens used for reasoning (only included if available)\n *\n * @example\n * ```typescript\n * const usage = {\n * input_tokens: 100,\n * output_tokens: 50,\n * total_tokens: 150,\n * input_tokens_details: { cached_tokens: 20 },\n * output_tokens_details: { reasoning_tokens: 10 }\n * };\n *\n * const metadata = convertResponsesUsageToUsageMetadata(usage);\n * // Returns:\n * // {\n * // input_tokens: 100,\n * // output_tokens: 50,\n * // total_tokens: 150,\n * // input_token_details: { cache_read: 20 },\n * // output_token_details: { reasoning: 10 }\n * // }\n * ```\n *\n * @remarks\n * - The function safely handles undefined or null values by using optional chaining\n * and nullish coalescing operators\n * - Detailed token information (cache_read, reasoning) is only included in the result\n * if the corresponding values are present in the input\n * - Token counts default to 0 if not provided in the usage object\n * - This converter is specifically designed for OpenAI's Responses API format and\n * may differ from other OpenAI API endpoints\n */\nexport declare const convertResponsesUsageToUsageMetadata: Converter<OpenAIClient.Responses.ResponseUsage | undefined, UsageMetadata>;\n/**\n * Converts an OpenAI Responses API response to a LangChain AIMessage.\n *\n * This converter processes the output from OpenAI's Responses API (both `create` and `parse` methods)\n * and transforms it into a LangChain AIMessage object with all relevant metadata, tool calls, and content.\n *\n * @param response - The response object from OpenAI's Responses API. Can be either:\n * - ResponsesCreateInvoke: Result from `responses.create()`\n * - ResponsesParseInvoke: Result from `responses.parse()`\n *\n * @returns An AIMessage containing:\n * - `id`: The message ID from the response output\n * - `content`: Array of message content blocks (text, images, etc.)\n * - `tool_calls`: Array of successfully parsed tool calls\n * - `invalid_tool_calls`: Array of tool calls that failed to parse\n * - `usage_metadata`: Token usage information converted to LangChain format\n * - `additional_kwargs`: Extra data including:\n * - `refusal`: Refusal text if the model refused to respond\n * - `reasoning`: Reasoning output for reasoning models\n * - `tool_outputs`: Results from built-in tools (web search, file search, etc.)\n * - `parsed`: Parsed structured output when using json_schema format\n * - Function call ID mappings for tracking\n * - `response_metadata`: Metadata about the response including model, timestamps, status, etc.\n *\n * @throws Error if the response contains an error object. The error message and code are extracted\n * from the response.error field.\n *\n * @example\n * ```typescript\n * const response = await client.responses.create({\n * model: \"gpt-4\",\n * input: [{ type: \"message\", content: \"Hello\" }]\n * });\n * const message = convertResponsesMessageToAIMessage(response);\n * console.log(message.content); // Message content\n * console.log(message.tool_calls); // Any tool calls made\n * ```\n *\n * @remarks\n * The converter handles multiple output item types:\n * - `message`: Text and structured content from the model\n * - `function_call`: Tool/function calls that need to be executed\n * - `reasoning`: Reasoning traces from reasoning models (o1, o3, etc.)\n * - `custom_tool_call`: Custom tool invocations\n * - Built-in tool outputs: web_search, file_search, code_interpreter, etc.\n *\n * Tool calls are parsed and validated. Invalid tool calls (malformed JSON, etc.) are captured\n * in the `invalid_tool_calls` array rather than throwing errors.\n */\nexport declare const convertResponsesMessageToAIMessage: Converter<ResponsesCreateInvoke | ResponsesParseInvoke, AIMessage>;\n/**\n * Converts a LangChain ChatOpenAI reasoning summary to an OpenAI Responses API reasoning item.\n *\n * This converter transforms reasoning summaries that have been accumulated during streaming\n * (where summary parts may arrive in multiple chunks with the same index) into the final\n * consolidated format expected by OpenAI's Responses API. It combines summary parts that\n * share the same index and removes the index field from the final output.\n *\n * @param reasoning - A ChatOpenAI reasoning summary object containing:\n * - `id`: The reasoning item ID\n * - `type`: The type of reasoning (typically \"reasoning\")\n * - `summary`: Array of summary parts, each with:\n * - `text`: The summary text content\n * - `type`: The summary type (e.g., \"summary_text\")\n * - `index`: The index used to group related summary parts during streaming\n *\n * @returns An OpenAI Responses API ResponseReasoningItem with:\n * - All properties from the input reasoning object\n * - `summary`: Consolidated array of summary objects with:\n * - `text`: Combined text from all parts with the same index\n * - `type`: The summary type\n * - No `index` field (removed after consolidation)\n *\n * @example\n * ```typescript\n * // Input: Reasoning summary with multiple parts at the same index\n * const reasoning = {\n * id: \"reasoning_123\",\n * type: \"reasoning\",\n * summary: [\n * { text: \"First \", type: \"summary_text\", index: 0 },\n * { text: \"part\", type: \"summary_text\", index: 0 },\n * { text: \"Second part\", type: \"summary_text\", index: 1 }\n * ]\n * };\n *\n * const result = convertReasoningSummaryToResponsesReasoningItem(reasoning);\n * // Returns:\n * // {\n * // id: \"reasoning_123\",\n * // type: \"reasoning\",\n * // summary: [\n * // { text: \"First part\", type: \"summary_text\" },\n * // { text: \"Second part\", type: \"summary_text\" }\n * // ]\n * // }\n * ```\n *\n * @remarks\n * - This converter is primarily used when reconstructing complete reasoning items from\n * streaming chunks, where summary parts may arrive incrementally with index markers\n * - Summary parts with the same index are concatenated in the order they appear\n * - If the reasoning summary contains only one part, no reduction is performed\n * - The index field is used internally during streaming to track which summary parts\n * belong together, but is removed from the final output as it's not part of the\n * OpenAI Responses API schema\n * - This is the inverse operation of the streaming accumulation that happens in\n * `convertResponsesDeltaToChatGenerationChunk`\n */\nexport declare const convertReasoningSummaryToResponsesReasoningItem: Converter<ChatOpenAIReasoningSummary, OpenAIClient.Responses.ResponseReasoningItem>;\n/**\n * Converts OpenAI Responses API stream events to LangChain ChatGenerationChunk objects.\n *\n * This converter processes streaming events from OpenAI's Responses API and transforms them\n * into LangChain ChatGenerationChunk objects that can be used in streaming chat applications.\n * It handles various event types including text deltas, tool calls, reasoning, and metadata updates.\n *\n * @param event - A streaming event from OpenAI's Responses API\n *\n * @returns A ChatGenerationChunk containing:\n * - `text`: Concatenated text content from all text parts in the event\n * - `message`: An AIMessageChunk with:\n * - `id`: Message ID (set when a message output item is added)\n * - `content`: Array of content blocks (text with optional annotations)\n * - `tool_call_chunks`: Incremental tool call data (name, args, id)\n * - `usage_metadata`: Token usage information (only in completion events)\n * - `additional_kwargs`: Extra data including:\n * - `refusal`: Refusal text if the model refused to respond\n * - `reasoning`: Reasoning output for reasoning models (id, type, summary)\n * - `tool_outputs`: Results from built-in tools (web search, file search, etc.)\n * - `parsed`: Parsed structured output when using json_schema format\n * - Function call ID mappings for tracking\n * - `response_metadata`: Metadata about the response (model, id, etc.)\n * - `generationInfo`: Additional generation information (e.g., tool output status)\n *\n * Returns `null` for events that don't produce meaningful chunks:\n * - Partial image generation events (to avoid storing all partial images in history)\n * - Unrecognized event types\n *\n * @example\n * ```typescript\n * const stream = await client.responses.create({\n * model: \"gpt-4\",\n * input: [{ type: \"message\", content: \"Hello\" }],\n * stream: true\n * });\n *\n * for await (const event of stream) {\n * const chunk = convertResponsesDeltaToChatGenerationChunk(event);\n * if (chunk) {\n * console.log(chunk.text); // Incremental text\n * console.log(chunk.message.tool_call_chunks); // Tool call updates\n * }\n * }\n * ```\n *\n * @remarks\n * - Text content is accumulated in an array with index tracking for proper ordering\n * - Tool call chunks include incremental arguments that need to be concatenated by the consumer\n * - Reasoning summaries are built incrementally across multiple events\n * - Function call IDs are tracked in `additional_kwargs` to map call_id to item id\n * - The `text` field is provided for legacy compatibility with `onLLMNewToken` callbacks\n * - Usage metadata is only available in `response.completed` events\n * - Partial images are intentionally ignored to prevent memory bloat in conversation history\n */\nexport declare const convertResponsesDeltaToChatGenerationChunk: Converter<OpenAIClient.Responses.ResponseStreamEvent, ChatGenerationChunk | null>;\n/**\n * Converts a single LangChain BaseMessage to OpenAI Responses API input format.\n *\n * This converter transforms a LangChain message into one or more ResponseInputItem objects\n * that can be used with OpenAI's Responses API. It handles complex message structures including\n * tool calls, reasoning blocks, multimodal content, and various content block types.\n *\n * @param message - The LangChain BaseMessage to convert. Can be any message type including\n * HumanMessage, AIMessage, SystemMessage, ToolMessage, etc.\n *\n * @returns An array of ResponseInputItem objects.\n *\n * @example\n * Basic text message conversion:\n * ```typescript\n * const message = new HumanMessage(\"Hello, how are you?\");\n * const items = convertStandardContentMessageToResponsesInput(message);\n * // Returns: [{ type: \"message\", role: \"user\", content: [{ type: \"input_text\", text: \"Hello, how are you?\" }] }]\n * ```\n *\n * @example\n * AI message with tool calls:\n * ```typescript\n * const message = new AIMessage({\n * content: \"I'll check the weather for you.\",\n * tool_calls: [{\n * id: \"call_123\",\n * name: \"get_weather\",\n * args: { location: \"San Francisco\" }\n * }]\n * });\n * const items = convertStandardContentMessageToResponsesInput(message);\n * // Returns:\n * // [\n * // { type: \"message\", role: \"assistant\", content: [{ type: \"input_text\", text: \"I'll check the weather for you.\" }] },\n * // { type: \"function_call\", call_id: \"call_123\", name: \"get_weather\", arguments: '{\"location\":\"San Francisco\"}' }\n * // ]\n * ```\n */\nexport declare const convertStandardContentMessageToResponsesInput: Converter<BaseMessage, OpenAIClient.Responses.ResponseInputItem[]>;\n/**\n * - MCP (Model Context Protocol) approval responses\n * - Zero Data Retention (ZDR) mode handling\n *\n * @param params - Conversion parameters\n * @param params.messages - Array of LangChain BaseMessages to convert\n * @param params.zdrEnabled - Whether Zero Data Retention mode is enabled. When true, certain\n * metadata like message IDs and function call IDs are omitted from the output\n * @param params.model - The model name being used. Used to determine if special role mapping\n * is needed (e.g., \"system\" -> \"developer\" for reasoning models)\n *\n * @returns Array of ResponsesInputItem objects formatted for the OpenAI Responses API\n *\n * @throws {Error} When a function message is encountered (not supported)\n * @throws {Error} When computer call output format is invalid\n *\n * @example\n * ```typescript\n * const messages = [\n * new HumanMessage(\"Hello\"),\n * new AIMessage({ content: \"Hi there!\", tool_calls: [...] })\n * ];\n *\n * const input = convertMessagesToResponsesInput({\n * messages,\n * zdrEnabled: false,\n * model: \"gpt-4\"\n * });\n * ```\n */\nexport declare const convertMessagesToResponsesInput: Converter<{\n messages: BaseMessage[];\n zdrEnabled: boolean;\n model: string;\n}, ResponsesInputItem[]>;\nexport {};\n//# sourceMappingURL=responses.d.ts.map"],"mappings":";;;;;;;KAKKQ,uBAAuBC;;AAD6B,CAAA,GACpDD,KAAAA,GAEOC,CAFPD;AAGOE,KAAAA,eAAAA,GAAkBT,QAAAA,CAAaU,SAAS,CAAA,QAAA,CAAA;AACxCC,KAAAA,cAAAA,GAAiBX,QAAAA,CAAaU,SAAS,CAAA,OAAA,CAAA;AACvCE,KAAAA,qBAAAA,GAAwBL,iBAAH,CAAqBO,OAArB,CAA6BD,UAA7B,CAAwCJ,eAAxC,CAAA,CAAA,CAAA;AAAwCA,KAC7DM,oBAAAA,GAAuBR,iBADsCE,CACpBK,OADoBL,CACZI,UADYJ,CACDE,cADCF,CAAAA,CAAAA,CAAAA;AAAXI,KAElDG,kBAAAA,GAAqBhB,QAAAA,CAAaU,SAAAA,CAAUO,iBAFMJ;;;AAAT;AACrD;;;;;AAAoD;AACpD;AAmDA;;;;AAAoE;AAkDpE;;;;;AAAkE;AA4DlE;;;;AAA+E;AAwD/E;;;;AAA0E;AAwC1E;;;;AAA6E;AA+B7E;;;;AAA+D;;;;;;;;;;cA7O1CK,sCAAsCZ,UAAUN,QAAAA,CAAaU,SAAAA,CAAUS,2BAA2BhB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAkDlGiB,oCAAoCd,UAAUM,wBAAwBG,sBAAsBd;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cA4D5FoB,iDAAiDf,UAAUD,4BAA4BL,QAAAA,CAAaU,SAAAA,CAAUY;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAwD9GC,4CAA4CjB,UAAUN,QAAAA,CAAaU,SAAAA,CAAUc,qBAAqBpB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAwClGqB,+CAA+CnB,UAAUJ,aAAaF,QAAAA,CAAaU,SAAAA,CAAUO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cA+B7FS,iCAAiCpB;YACxCJ;;;GAGXc"}