UNPKG

@langchain/openai

Version:
1 lines 16.5 kB
{"version":3,"file":"completions.d.cts","names":["BaseMessage","BaseMessageChunk","StandardContentBlockConverter","ContentBlock","Converter","ChatCompletionContentPartText","ChatCompletionContentPartImage","ChatCompletionContentPartInputAudio","ChatCompletionContentPart","OpenAI","OpenAIClient","completionsApiContentBlockConverter","File","convertCompletionsMessageToBaseMessage","Chat","Completions","ChatCompletionMessage","ChatCompletion","convertCompletionsDeltaToBaseMessageChunk","Record","ChatCompletionChunk","ChatCompletionRole","convertStandardContentBlockToCompletionsContentPart","Standard","convertStandardContentMessageToCompletionsMessage","ChatCompletionMessageParam","convertMessagesToCompletionsMessageParams"],"sources":["../../src/converters/completions.d.ts"],"sourcesContent":["import { BaseMessage, BaseMessageChunk, StandardContentBlockConverter, ContentBlock } from \"@langchain/core/messages\";\nimport { Converter } from \"@langchain/core/utils/format\";\nimport type { ChatCompletionContentPartText, ChatCompletionContentPartImage, ChatCompletionContentPartInputAudio, ChatCompletionContentPart } from \"openai/resources/chat/completions\";\nimport { OpenAI as OpenAIClient } from \"openai\";\n/**\n * @deprecated This converter is an internal detail of the OpenAI provider. Do not use it directly. This will be revisited in a future release.\n */\nexport declare const completionsApiContentBlockConverter: StandardContentBlockConverter<{\n text: ChatCompletionContentPartText;\n image: ChatCompletionContentPartImage;\n audio: ChatCompletionContentPartInputAudio;\n file: ChatCompletionContentPart.File;\n}>;\n/**\n * Converts an OpenAI Chat Completions API message to a LangChain BaseMessage.\n *\n * This converter transforms messages from OpenAI's Chat Completions API format into\n * LangChain's internal message representation, handling various message types and\n * preserving metadata, tool calls, and other relevant information.\n *\n * @remarks\n * The converter handles the following message roles:\n * - `assistant`: Converted to {@link AIMessage} with support for tool calls, function calls,\n * audio content, and multi-modal outputs\n * - Other roles: Converted to generic {@link ChatMessage}\n *\n * For assistant messages, the converter:\n * - Parses and validates tool calls, separating valid and invalid calls\n * - Preserves function call information in additional_kwargs\n * - Includes usage statistics and system fingerprint in response_metadata\n * - Handles multi-modal content (text, images, audio)\n * - Optionally includes the raw API response for debugging\n *\n * @param params - Conversion parameters\n * @param params.message - The OpenAI chat completion message to convert\n * @param params.rawResponse - The complete raw response from OpenAI's API, used to extract\n * metadata like model name, usage statistics, and system fingerprint\n * @param params.includeRawResponse - If true, includes the raw OpenAI response in the\n * message's additional_kwargs under the `__raw_response` key. Useful for debugging\n * or accessing provider-specific fields. Defaults to false.\n *\n * @returns A LangChain BaseMessage instance:\n * - {@link AIMessage} for assistant messages with tool calls, metadata, and content\n * - {@link ChatMessage} for all other message types\n *\n * @example\n * ```typescript\n * const baseMessage = convertCompletionsMessageToBaseMessage({\n * message: {\n * role: \"assistant\",\n * content: \"Hello! How can I help you?\",\n * tool_calls: [\n * {\n * id: \"call_123\",\n * type: \"function\",\n * function: { name: \"get_weather\", arguments: '{\"location\":\"NYC\"}' }\n * }\n * ]\n * },\n * rawResponse: completionResponse,\n * includeRawResponse: true\n * });\n * // Returns an AIMessage with parsed tool calls and metadata\n * ```\n *\n * @throws {Error} If tool call parsing fails, the invalid tool call is captured in\n * the `invalid_tool_calls` array rather than throwing an error\n *\n */\nexport declare const convertCompletionsMessageToBaseMessage: Converter<{\n message: OpenAIClient.Chat.Completions.ChatCompletionMessage;\n rawResponse: OpenAIClient.Chat.Completions.ChatCompletion;\n includeRawResponse?: boolean;\n}, BaseMessage>;\n/**\n * Converts an OpenAI Chat Completions API delta (streaming chunk) to a LangChain BaseMessageChunk.\n *\n * This converter is used during streaming responses to transform incremental updates from OpenAI's\n * Chat Completions API into LangChain message chunks. It handles various message types, tool calls,\n * function calls, audio content, and role-specific message chunk creation.\n *\n * @param params - Conversion parameters\n * @param params.delta - The delta object from an OpenAI streaming chunk containing incremental\n * message updates. May include content, role, tool_calls, function_call, audio, etc.\n * @param params.rawResponse - The complete raw ChatCompletionChunk response from OpenAI,\n * containing metadata like model info, usage stats, and the delta\n * @param params.includeRawResponse - Optional flag to include the raw OpenAI response in the\n * message chunk's additional_kwargs. Useful for debugging or accessing provider-specific data\n * @param params.defaultRole - Optional default role to use if the delta doesn't specify one.\n * Typically used to maintain role consistency across chunks in a streaming response\n *\n * @returns A BaseMessageChunk subclass appropriate for the message role:\n * - HumanMessageChunk for \"user\" role\n * - AIMessageChunk for \"assistant\" role (includes tool call chunks)\n * - SystemMessageChunk for \"system\" or \"developer\" roles\n * - FunctionMessageChunk for \"function\" role\n * - ToolMessageChunk for \"tool\" role\n * - ChatMessageChunk for any other role\n *\n * @example\n * Basic streaming text chunk:\n * ```typescript\n * const chunk = convertCompletionsDeltaToBaseMessageChunk({\n * delta: { role: \"assistant\", content: \"Hello\" },\n * rawResponse: { id: \"chatcmpl-123\", model: \"gpt-4\", ... }\n * });\n * // Returns: AIMessageChunk with content \"Hello\"\n * ```\n *\n * @example\n * Streaming chunk with tool call:\n * ```typescript\n * const chunk = convertCompletionsDeltaToBaseMessageChunk({\n * delta: {\n * role: \"assistant\",\n * tool_calls: [{\n * index: 0,\n * id: \"call_123\",\n * function: { name: \"get_weather\", arguments: '{\"location\":' }\n * }]\n * },\n * rawResponse: { id: \"chatcmpl-123\", ... }\n * });\n * // Returns: AIMessageChunk with tool_call_chunks containing partial tool call data\n * ```\n *\n * @remarks\n * - Tool calls are converted to ToolCallChunk objects with incremental data\n * - Audio content includes the chunk index from the raw response\n * - The \"developer\" role is mapped to SystemMessageChunk with a special marker\n * - Response metadata includes model provider info and usage statistics\n * - Function calls and tool calls are stored in additional_kwargs for compatibility\n */\nexport declare const convertCompletionsDeltaToBaseMessageChunk: Converter<{\n delta: Record<string, any>;\n rawResponse: OpenAIClient.Chat.Completions.ChatCompletionChunk;\n includeRawResponse?: boolean;\n defaultRole?: OpenAIClient.Chat.ChatCompletionRole;\n}, BaseMessageChunk>;\n/**\n * Converts a standard LangChain content block to an OpenAI Completions API content part.\n *\n * This converter transforms LangChain's standardized content blocks (image, audio, file)\n * into the format expected by OpenAI's Chat Completions API. It handles various content\n * types including images (URL or base64), audio (base64), and files (data or file ID).\n *\n * @param block - The standard content block to convert. Can be an image, audio, or file block.\n *\n * @returns An OpenAI Chat Completions content part object, or undefined if the block\n * cannot be converted (e.g., missing required data).\n *\n * @example\n * Image with URL:\n * ```typescript\n * const block = { type: \"image\", url: \"https://example.com/image.jpg\" };\n * const part = convertStandardContentBlockToCompletionsContentPart(block);\n * // Returns: { type: \"image_url\", image_url: { url: \"https://example.com/image.jpg\" } }\n * ```\n *\n * @example\n * Image with base64 data:\n * ```typescript\n * const block = { type: \"image\", data: \"iVBORw0KGgo...\", mimeType: \"image/png\" };\n * const part = convertStandardContentBlockToCompletionsContentPart(block);\n * // Returns: { type: \"image_url\", image_url: { url: \"data:image/png;base64,iVBORw0KGgo...\" } }\n * ```\n */\nexport declare const convertStandardContentBlockToCompletionsContentPart: Converter<ContentBlock.Standard, OpenAIClient.Chat.Completions.ChatCompletionContentPartImage | OpenAIClient.Chat.Completions.ChatCompletionContentPartInputAudio | OpenAIClient.Chat.Completions.ChatCompletionContentPart.File | undefined>;\n/**\n * Converts a LangChain BaseMessage with standard content blocks to an OpenAI Chat Completions API message parameter.\n *\n * This converter transforms LangChain's standardized message format (using contentBlocks) into the format\n * expected by OpenAI's Chat Completions API. It handles role mapping, content filtering, and multi-modal\n * content conversion for various message types.\n *\n * @remarks\n * The converter performs the following transformations:\n * - Maps LangChain message roles to OpenAI API roles (user, assistant, system, developer, tool, function)\n * - For reasoning models, automatically converts \"system\" role to \"developer\" role\n * - Filters content blocks based on message role (most roles only include text blocks)\n * - For user messages, converts multi-modal content blocks (images, audio, files) to OpenAI format\n * - Preserves tool call IDs for tool messages and function names for function messages\n *\n * Role-specific behavior:\n * - **developer**: Returns only text content blocks (used for reasoning models)\n * - **system**: Returns only text content blocks\n * - **assistant**: Returns only text content blocks\n * - **tool**: Returns only text content blocks with tool_call_id preserved\n * - **function**: Returns text content blocks joined as a single string with function name\n * - **user** (default): Returns multi-modal content including text, images, audio, and files\n *\n * @param params - Conversion parameters\n * @param params.message - The LangChain BaseMessage to convert. Must have contentBlocks property\n * containing an array of standard content blocks (text, image, audio, file, etc.)\n * @param params.model - Optional model name. Used to determine if special role mapping is needed\n * (e.g., \"system\" -> \"developer\" for reasoning models like o1)\n *\n * @returns An OpenAI ChatCompletionMessageParam object formatted for the Chat Completions API.\n * The structure varies by role:\n * - Developer/System/Assistant: `{ role, content: TextBlock[] }`\n * - Tool: `{ role: \"tool\", tool_call_id, content: TextBlock[] }`\n * - Function: `{ role: \"function\", name, content: string }`\n * - User: `{ role: \"user\", content: Array<TextPart | ImagePart | AudioPart | FilePart> }`\n *\n * @example\n * Simple text message:\n * ```typescript\n * const message = new HumanMessage({\n * content: [{ type: \"text\", text: \"Hello!\" }]\n * });\n * const param = convertStandardContentMessageToCompletionsMessage({ message });\n * // Returns: { role: \"user\", content: [{ type: \"text\", text: \"Hello!\" }] }\n * ```\n *\n * @example\n * Multi-modal user message with image:\n * ```typescript\n * const message = new HumanMessage({\n * content: [\n * { type: \"text\", text: \"What's in this image?\" },\n * { type: \"image\", url: \"https://example.com/image.jpg\" }\n * ]\n * });\n * const param = convertStandardContentMessageToCompletionsMessage({ message });\n * // Returns: {\n * // role: \"user\",\n * // content: [\n * // { type: \"text\", text: \"What's in this image?\" },\n * // { type: \"image_url\", image_url: { url: \"https://example.com/image.jpg\" } }\n * // ]\n * // }\n * ```\n */\nexport declare const convertStandardContentMessageToCompletionsMessage: Converter<{\n message: BaseMessage;\n model?: string;\n}, OpenAIClient.Chat.Completions.ChatCompletionMessageParam>;\n/**\n * Converts an array of LangChain BaseMessages to OpenAI Chat Completions API message parameters.\n *\n * This converter transforms LangChain's internal message representation into the format required\n * by OpenAI's Chat Completions API. It handles various message types, roles, content formats,\n * tool calls, function calls, audio messages, and special model-specific requirements.\n *\n * @remarks\n * The converter performs several key transformations:\n * - Maps LangChain message types to OpenAI roles (user, assistant, system, tool, function, developer)\n * - Converts standard content blocks (v1 format) using a specialized converter\n * - Handles multimodal content including text, images, audio, and data blocks\n * - Preserves tool calls and function calls with proper formatting\n * - Applies model-specific role mappings (e.g., \"system\"\"developer\" for reasoning models)\n * - Splits audio messages into separate message parameters when needed\n *\n * @param params - Conversion parameters\n * @param params.messages - Array of LangChain BaseMessages to convert. Can include any message\n * type: HumanMessage, AIMessage, SystemMessage, ToolMessage, FunctionMessage, etc.\n * @param params.model - Optional model name used to determine if special role mapping is needed.\n * For reasoning models (o1, o3, etc.), \"system\" role is converted to \"developer\" role.\n *\n * @returns Array of ChatCompletionMessageParam objects formatted for OpenAI's Chat Completions API.\n * Some messages may be split into multiple parameters (e.g., audio messages).\n *\n * @example\n * Basic message conversion:\n * ```typescript\n * const messages = [\n * new HumanMessage(\"What's the weather like?\"),\n * new AIMessage(\"Let me check that for you.\")\n * ];\n *\n * const params = convertMessagesToCompletionsMessageParams({\n * messages,\n * model: \"gpt-4\"\n * });\n * // Returns:\n * // [\n * // { role: \"user\", content: \"What's the weather like?\" },\n * // { role: \"assistant\", content: \"Let me check that for you.\" }\n * // ]\n * ```\n *\n * @example\n * Message with tool calls:\n * ```typescript\n * const messages = [\n * new AIMessage({\n * content: \"\",\n * tool_calls: [{\n * id: \"call_123\",\n * name: \"get_weather\",\n * args: { location: \"San Francisco\" }\n * }]\n * })\n * ];\n *\n * const params = convertMessagesToCompletionsMessageParams({ messages });\n * // Returns:\n * // [{\n * // role: \"assistant\",\n * // content: \"\",\n * // tool_calls: [{\n * // id: \"call_123\",\n * // type: \"function\",\n * // function: { name: \"get_weather\", arguments: '{\"location\":\"San Francisco\"}' }\n * // }]\n * // }]\n * ```\n */\nexport declare const convertMessagesToCompletionsMessageParams: Converter<{\n messages: BaseMessage[];\n model?: string;\n}, OpenAIClient.Chat.Completions.ChatCompletionMessageParam[]>;\n//# sourceMappingURL=completions.d.ts.map"],"mappings":";;;;;;;;;AAOA;AACUK,cADWM,mCACXN,EADgDH,6BAChDG,CAAAA;EACCC,IAAAA,EADDD,6BACCC;EACAC,KAAAA,EADAD,8BACAC;EACDC,KAAAA,EADCD,mCACyBK;EAJsBV,IAAAA,EAIhDM,yBAAAA,CAA0BI,IAJsBV;AAA6B,CAAA,CAAA;AA8DvF;;;;;AAAsE;AAgEtE;;;;;;AAAyE;AAkCzE;;;;;;AAAmF;AAkEnF;;;;AAAiF;AA2EjF;;;;AAAyE;;;;;;;;;;;;;;;;;;;;;;;;;;;cA/OpDW,wCAAwCT;WAChDM,QAAAA,CAAaI,IAAAA,CAAKC,WAAAA,CAAYC;eAC1BN,QAAAA,CAAaI,IAAAA,CAAKC,WAAAA,CAAYE;;GAE5CjB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cA4DkBkB,2CAA2Cd;SACrDe;eACMT,QAAAA,CAAaI,IAAAA,CAAKC,WAAAA,CAAYK;;gBAE7BV,QAAAA,CAAaI,IAAAA,CAAKO;GACjCpB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cA6BkBqB,qDAAqDlB,UAAUD,YAAAA,CAAaoB,UAAUb,QAAAA,CAAaI,IAAAA,CAAKC,WAAAA,CAAYT,iCAAiCI,QAAAA,CAAaI,IAAAA,CAAKC,WAAAA,CAAYR,sCAAsCG,QAAAA,CAAaI,IAAAA,CAAKC,WAAAA,CAAYP,yBAAAA,CAA0BI;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAkEjRY,mDAAmDpB;WAC3DJ;;GAEVU,QAAAA,CAAaI,IAAAA,CAAKC,WAAAA,CAAYU;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAwEZC,2CAA2CtB;YAClDJ;;GAEXU,QAAAA,CAAaI,IAAAA,CAAKC,WAAAA,CAAYU"}