UNPKG

@ai-sdk/openai

Version:

The **[OpenAI provider](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the OpenAI chat and completion APIs and embedding model support for the OpenAI embeddings API.

1,339 lines (1,318 loc) 220 kB
"use strict"; var __defProp = Object.defineProperty; var __getOwnPropDesc = Object.getOwnPropertyDescriptor; var __getOwnPropNames = Object.getOwnPropertyNames; var __hasOwnProp = Object.prototype.hasOwnProperty; var __export = (target, all) => { for (var name in all) __defProp(target, name, { get: all[name], enumerable: true }); }; var __copyProps = (to, from, except, desc) => { if (from && typeof from === "object" || typeof from === "function") { for (let key of __getOwnPropNames(from)) if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); } return to; }; var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); // src/internal/index.ts var internal_exports = {}; __export(internal_exports, { OpenAIChatLanguageModel: () => OpenAIChatLanguageModel, OpenAICompletionLanguageModel: () => OpenAICompletionLanguageModel, OpenAIEmbeddingModel: () => OpenAIEmbeddingModel, OpenAIImageModel: () => OpenAIImageModel, OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel, OpenAISpeechModel: () => OpenAISpeechModel, OpenAITranscriptionModel: () => OpenAITranscriptionModel, applyPatch: () => applyPatch, applyPatchArgsSchema: () => applyPatchArgsSchema, applyPatchInputSchema: () => applyPatchInputSchema, applyPatchOutputSchema: () => applyPatchOutputSchema, applyPatchToolFactory: () => applyPatchToolFactory, codeInterpreter: () => codeInterpreter, codeInterpreterArgsSchema: () => codeInterpreterArgsSchema, codeInterpreterInputSchema: () => codeInterpreterInputSchema, codeInterpreterOutputSchema: () => codeInterpreterOutputSchema, codeInterpreterToolFactory: () => codeInterpreterToolFactory, fileSearch: () => fileSearch, fileSearchArgsSchema: () => fileSearchArgsSchema, fileSearchOutputSchema: () => fileSearchOutputSchema, hasDefaultResponseFormat: () => hasDefaultResponseFormat, imageGeneration: () => imageGeneration, imageGenerationArgsSchema: () => imageGenerationArgsSchema, imageGenerationOutputSchema: () => imageGenerationOutputSchema, modelMaxImagesPerCall: () => modelMaxImagesPerCall, openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions, openaiChatLanguageModelOptions: () => openaiChatLanguageModelOptions, openaiCompletionProviderOptions: () => openaiCompletionProviderOptions, openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions, openaiSpeechProviderOptionsSchema: () => openaiSpeechProviderOptionsSchema, webSearchPreview: () => webSearchPreview, webSearchPreviewArgsSchema: () => webSearchPreviewArgsSchema, webSearchPreviewInputSchema: () => webSearchPreviewInputSchema }); module.exports = __toCommonJS(internal_exports); // src/chat/openai-chat-language-model.ts var import_provider3 = require("@ai-sdk/provider"); var import_provider_utils5 = require("@ai-sdk/provider-utils"); // src/openai-error.ts var import_v4 = require("zod/v4"); var import_provider_utils = require("@ai-sdk/provider-utils"); var openaiErrorDataSchema = import_v4.z.object({ error: import_v4.z.object({ message: import_v4.z.string(), // The additional information below is handled loosely to support // OpenAI-compatible providers that have slightly different error // responses: type: import_v4.z.string().nullish(), param: import_v4.z.any().nullish(), code: import_v4.z.union([import_v4.z.string(), import_v4.z.number()]).nullish() }) }); var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({ errorSchema: openaiErrorDataSchema, errorToMessage: (data) => data.error.message }); // src/openai-language-model-capabilities.ts function getOpenAILanguageModelCapabilities(modelId) { const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat"); const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini"); const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("codex-mini") || modelId.startsWith("computer-use-preview") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat"); const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2"); const systemMessageMode = isReasoningModel ? "developer" : "system"; return { supportsFlexProcessing, supportsPriorityProcessing, isReasoningModel, systemMessageMode, supportsNonReasoningParameters }; } // src/chat/convert-openai-chat-usage.ts function convertOpenAIChatUsage(usage) { var _a, _b, _c, _d, _e, _f; if (usage == null) { return { inputTokens: { total: void 0, noCache: void 0, cacheRead: void 0, cacheWrite: void 0 }, outputTokens: { total: void 0, text: void 0, reasoning: void 0 }, raw: void 0 }; } const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0; const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0; const cachedTokens = (_d = (_c = usage.prompt_tokens_details) == null ? void 0 : _c.cached_tokens) != null ? _d : 0; const reasoningTokens = (_f = (_e = usage.completion_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : 0; return { inputTokens: { total: promptTokens, noCache: promptTokens - cachedTokens, cacheRead: cachedTokens, cacheWrite: void 0 }, outputTokens: { total: completionTokens, text: completionTokens - reasoningTokens, reasoning: reasoningTokens }, raw: usage }; } // src/chat/convert-to-openai-chat-messages.ts var import_provider = require("@ai-sdk/provider"); var import_provider_utils2 = require("@ai-sdk/provider-utils"); function convertToOpenAIChatMessages({ prompt, systemMessageMode = "system" }) { var _a; const messages = []; const warnings = []; for (const { role, content } of prompt) { switch (role) { case "system": { switch (systemMessageMode) { case "system": { messages.push({ role: "system", content }); break; } case "developer": { messages.push({ role: "developer", content }); break; } case "remove": { warnings.push({ type: "other", message: "system messages are removed for this model" }); break; } default: { const _exhaustiveCheck = systemMessageMode; throw new Error( `Unsupported system message mode: ${_exhaustiveCheck}` ); } } break; } case "user": { if (content.length === 1 && content[0].type === "text") { messages.push({ role: "user", content: content[0].text }); break; } messages.push({ role: "user", content: content.map((part, index) => { var _a2, _b, _c; switch (part.type) { case "text": { return { type: "text", text: part.text }; } case "file": { if (part.mediaType.startsWith("image/")) { const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType; return { type: "image_url", image_url: { url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils2.convertToBase64)(part.data)}`, // OpenAI specific extension: image detail detail: (_b = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b.imageDetail } }; } else if (part.mediaType.startsWith("audio/")) { if (part.data instanceof URL) { throw new import_provider.UnsupportedFunctionalityError({ functionality: "audio file parts with URLs" }); } switch (part.mediaType) { case "audio/wav": { return { type: "input_audio", input_audio: { data: (0, import_provider_utils2.convertToBase64)(part.data), format: "wav" } }; } case "audio/mp3": case "audio/mpeg": { return { type: "input_audio", input_audio: { data: (0, import_provider_utils2.convertToBase64)(part.data), format: "mp3" } }; } default: { throw new import_provider.UnsupportedFunctionalityError({ functionality: `audio content parts with media type ${part.mediaType}` }); } } } else if (part.mediaType === "application/pdf") { if (part.data instanceof URL) { throw new import_provider.UnsupportedFunctionalityError({ functionality: "PDF file parts with URLs" }); } return { type: "file", file: typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : { filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`, file_data: `data:application/pdf;base64,${(0, import_provider_utils2.convertToBase64)(part.data)}` } }; } else { throw new import_provider.UnsupportedFunctionalityError({ functionality: `file part media type ${part.mediaType}` }); } } } }) }); break; } case "assistant": { let text = ""; const toolCalls = []; for (const part of content) { switch (part.type) { case "text": { text += part.text; break; } case "tool-call": { toolCalls.push({ id: part.toolCallId, type: "function", function: { name: part.toolName, arguments: JSON.stringify(part.input) } }); break; } } } messages.push({ role: "assistant", content: text, tool_calls: toolCalls.length > 0 ? toolCalls : void 0 }); break; } case "tool": { for (const toolResponse of content) { if (toolResponse.type === "tool-approval-response") { continue; } const output = toolResponse.output; let contentValue; switch (output.type) { case "text": case "error-text": contentValue = output.value; break; case "execution-denied": contentValue = (_a = output.reason) != null ? _a : "Tool execution denied."; break; case "content": case "json": case "error-json": contentValue = JSON.stringify(output.value); break; } messages.push({ role: "tool", tool_call_id: toolResponse.toolCallId, content: contentValue }); } break; } default: { const _exhaustiveCheck = role; throw new Error(`Unsupported role: ${_exhaustiveCheck}`); } } } return { messages, warnings }; } // src/chat/get-response-metadata.ts function getResponseMetadata({ id, model, created }) { return { id: id != null ? id : void 0, modelId: model != null ? model : void 0, timestamp: created ? new Date(created * 1e3) : void 0 }; } // src/chat/map-openai-finish-reason.ts function mapOpenAIFinishReason(finishReason) { switch (finishReason) { case "stop": return "stop"; case "length": return "length"; case "content_filter": return "content-filter"; case "function_call": case "tool_calls": return "tool-calls"; default: return "other"; } } // src/chat/openai-chat-api.ts var import_provider_utils3 = require("@ai-sdk/provider-utils"); var import_v42 = require("zod/v4"); var openaiChatResponseSchema = (0, import_provider_utils3.lazySchema)( () => (0, import_provider_utils3.zodSchema)( import_v42.z.object({ id: import_v42.z.string().nullish(), created: import_v42.z.number().nullish(), model: import_v42.z.string().nullish(), choices: import_v42.z.array( import_v42.z.object({ message: import_v42.z.object({ role: import_v42.z.literal("assistant").nullish(), content: import_v42.z.string().nullish(), tool_calls: import_v42.z.array( import_v42.z.object({ id: import_v42.z.string().nullish(), type: import_v42.z.literal("function"), function: import_v42.z.object({ name: import_v42.z.string(), arguments: import_v42.z.string() }) }) ).nullish(), annotations: import_v42.z.array( import_v42.z.object({ type: import_v42.z.literal("url_citation"), url_citation: import_v42.z.object({ start_index: import_v42.z.number(), end_index: import_v42.z.number(), url: import_v42.z.string(), title: import_v42.z.string() }) }) ).nullish() }), index: import_v42.z.number(), logprobs: import_v42.z.object({ content: import_v42.z.array( import_v42.z.object({ token: import_v42.z.string(), logprob: import_v42.z.number(), top_logprobs: import_v42.z.array( import_v42.z.object({ token: import_v42.z.string(), logprob: import_v42.z.number() }) ) }) ).nullish() }).nullish(), finish_reason: import_v42.z.string().nullish() }) ), usage: import_v42.z.object({ prompt_tokens: import_v42.z.number().nullish(), completion_tokens: import_v42.z.number().nullish(), total_tokens: import_v42.z.number().nullish(), prompt_tokens_details: import_v42.z.object({ cached_tokens: import_v42.z.number().nullish() }).nullish(), completion_tokens_details: import_v42.z.object({ reasoning_tokens: import_v42.z.number().nullish(), accepted_prediction_tokens: import_v42.z.number().nullish(), rejected_prediction_tokens: import_v42.z.number().nullish() }).nullish() }).nullish() }) ) ); var openaiChatChunkSchema = (0, import_provider_utils3.lazySchema)( () => (0, import_provider_utils3.zodSchema)( import_v42.z.union([ import_v42.z.object({ id: import_v42.z.string().nullish(), created: import_v42.z.number().nullish(), model: import_v42.z.string().nullish(), choices: import_v42.z.array( import_v42.z.object({ delta: import_v42.z.object({ role: import_v42.z.enum(["assistant"]).nullish(), content: import_v42.z.string().nullish(), tool_calls: import_v42.z.array( import_v42.z.object({ index: import_v42.z.number(), id: import_v42.z.string().nullish(), type: import_v42.z.literal("function").nullish(), function: import_v42.z.object({ name: import_v42.z.string().nullish(), arguments: import_v42.z.string().nullish() }) }) ).nullish(), annotations: import_v42.z.array( import_v42.z.object({ type: import_v42.z.literal("url_citation"), url_citation: import_v42.z.object({ start_index: import_v42.z.number(), end_index: import_v42.z.number(), url: import_v42.z.string(), title: import_v42.z.string() }) }) ).nullish() }).nullish(), logprobs: import_v42.z.object({ content: import_v42.z.array( import_v42.z.object({ token: import_v42.z.string(), logprob: import_v42.z.number(), top_logprobs: import_v42.z.array( import_v42.z.object({ token: import_v42.z.string(), logprob: import_v42.z.number() }) ) }) ).nullish() }).nullish(), finish_reason: import_v42.z.string().nullish(), index: import_v42.z.number() }) ), usage: import_v42.z.object({ prompt_tokens: import_v42.z.number().nullish(), completion_tokens: import_v42.z.number().nullish(), total_tokens: import_v42.z.number().nullish(), prompt_tokens_details: import_v42.z.object({ cached_tokens: import_v42.z.number().nullish() }).nullish(), completion_tokens_details: import_v42.z.object({ reasoning_tokens: import_v42.z.number().nullish(), accepted_prediction_tokens: import_v42.z.number().nullish(), rejected_prediction_tokens: import_v42.z.number().nullish() }).nullish() }).nullish() }), openaiErrorDataSchema ]) ) ); // src/chat/openai-chat-options.ts var import_provider_utils4 = require("@ai-sdk/provider-utils"); var import_v43 = require("zod/v4"); var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)( () => (0, import_provider_utils4.zodSchema)( import_v43.z.object({ /** * Modify the likelihood of specified tokens appearing in the completion. * * Accepts a JSON object that maps tokens (specified by their token ID in * the GPT tokenizer) to an associated bias value from -100 to 100. */ logitBias: import_v43.z.record(import_v43.z.coerce.number(), import_v43.z.number()).optional(), /** * Return the log probabilities of the tokens. * * Setting to true will return the log probabilities of the tokens that * were generated. * * Setting to a number will return the log probabilities of the top n * tokens that were generated. */ logprobs: import_v43.z.union([import_v43.z.boolean(), import_v43.z.number()]).optional(), /** * Whether to enable parallel function calling during tool use. Default to true. */ parallelToolCalls: import_v43.z.boolean().optional(), /** * A unique identifier representing your end-user, which can help OpenAI to * monitor and detect abuse. */ user: import_v43.z.string().optional(), /** * Reasoning effort for reasoning models. Defaults to `medium`. */ reasoningEffort: import_v43.z.enum(["none", "minimal", "low", "medium", "high", "xhigh"]).optional(), /** * Maximum number of completion tokens to generate. Useful for reasoning models. */ maxCompletionTokens: import_v43.z.number().optional(), /** * Whether to enable persistence in responses API. */ store: import_v43.z.boolean().optional(), /** * Metadata to associate with the request. */ metadata: import_v43.z.record(import_v43.z.string().max(64), import_v43.z.string().max(512)).optional(), /** * Parameters for prediction mode. */ prediction: import_v43.z.record(import_v43.z.string(), import_v43.z.any()).optional(), /** * Service tier for the request. * - 'auto': Default service tier. The request will be processed with the service tier configured in the * Project settings. Unless otherwise configured, the Project will use 'default'. * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models. * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers. * - 'default': The request will be processed with the standard pricing and performance for the selected model. * * @default 'auto' */ serviceTier: import_v43.z.enum(["auto", "flex", "priority", "default"]).optional(), /** * Whether to use strict JSON schema validation. * * @default true */ strictJsonSchema: import_v43.z.boolean().optional(), /** * Controls the verbosity of the model's responses. * Lower values will result in more concise responses, while higher values will result in more verbose responses. */ textVerbosity: import_v43.z.enum(["low", "medium", "high"]).optional(), /** * A cache key for prompt caching. Allows manual control over prompt caching behavior. * Useful for improving cache hit rates and working around automatic caching issues. */ promptCacheKey: import_v43.z.string().optional(), /** * The retention policy for the prompt cache. * - 'in_memory': Default. Standard prompt caching behavior. * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours. * Currently only available for 5.1 series models. * * @default 'in_memory' */ promptCacheRetention: import_v43.z.enum(["in_memory", "24h"]).optional(), /** * A stable identifier used to help detect users of your application * that may be violating OpenAI's usage policies. The IDs should be a * string that uniquely identifies each user. We recommend hashing their * username or email address, in order to avoid sending us any identifying * information. */ safetyIdentifier: import_v43.z.string().optional(), /** * Override the system message mode for this model. * - 'system': Use the 'system' role for system messages (default for most models) * - 'developer': Use the 'developer' role for system messages (used by reasoning models) * - 'remove': Remove system messages entirely * * If not specified, the mode is automatically determined based on the model. */ systemMessageMode: import_v43.z.enum(["system", "developer", "remove"]).optional(), /** * Force treating this model as a reasoning model. * * This is useful for "stealth" reasoning models (e.g. via a custom baseURL) * where the model ID is not recognized by the SDK's allowlist. * * When enabled, the SDK applies reasoning-model parameter compatibility rules * and defaults `systemMessageMode` to `developer` unless overridden. */ forceReasoning: import_v43.z.boolean().optional() }) ) ); // src/chat/openai-chat-prepare-tools.ts var import_provider2 = require("@ai-sdk/provider"); function prepareChatTools({ tools, toolChoice }) { tools = (tools == null ? void 0 : tools.length) ? tools : void 0; const toolWarnings = []; if (tools == null) { return { tools: void 0, toolChoice: void 0, toolWarnings }; } const openaiTools = []; for (const tool of tools) { switch (tool.type) { case "function": openaiTools.push({ type: "function", function: { name: tool.name, description: tool.description, parameters: tool.inputSchema, ...tool.strict != null ? { strict: tool.strict } : {} } }); break; default: toolWarnings.push({ type: "unsupported", feature: `tool type: ${tool.type}` }); break; } } if (toolChoice == null) { return { tools: openaiTools, toolChoice: void 0, toolWarnings }; } const type = toolChoice.type; switch (type) { case "auto": case "none": case "required": return { tools: openaiTools, toolChoice: type, toolWarnings }; case "tool": return { tools: openaiTools, toolChoice: { type: "function", function: { name: toolChoice.toolName } }, toolWarnings }; default: { const _exhaustiveCheck = type; throw new import_provider2.UnsupportedFunctionalityError({ functionality: `tool choice type: ${_exhaustiveCheck}` }); } } } // src/chat/openai-chat-language-model.ts var OpenAIChatLanguageModel = class { constructor(modelId, config) { this.specificationVersion = "v3"; this.supportedUrls = { "image/*": [/^https?:\/\/.*$/] }; this.modelId = modelId; this.config = config; } get provider() { return this.config.provider; } async getArgs({ prompt, maxOutputTokens, temperature, topP, topK, frequencyPenalty, presencePenalty, stopSequences, responseFormat, seed, tools, toolChoice, providerOptions }) { var _a, _b, _c, _d, _e; const warnings = []; const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({ provider: "openai", providerOptions, schema: openaiChatLanguageModelOptions })) != null ? _a : {}; const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId); const isReasoningModel = (_b = openaiOptions.forceReasoning) != null ? _b : modelCapabilities.isReasoningModel; if (topK != null) { warnings.push({ type: "unsupported", feature: "topK" }); } const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages( { prompt, systemMessageMode: (_c = openaiOptions.systemMessageMode) != null ? _c : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode } ); warnings.push(...messageWarnings); const strictJsonSchema = (_d = openaiOptions.strictJsonSchema) != null ? _d : true; const baseArgs = { // model id: model: this.modelId, // model specific settings: logit_bias: openaiOptions.logitBias, logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0, top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0, user: openaiOptions.user, parallel_tool_calls: openaiOptions.parallelToolCalls, // standardized settings: max_tokens: maxOutputTokens, temperature, top_p: topP, frequency_penalty: frequencyPenalty, presence_penalty: presencePenalty, response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? responseFormat.schema != null ? { type: "json_schema", json_schema: { schema: responseFormat.schema, strict: strictJsonSchema, name: (_e = responseFormat.name) != null ? _e : "response", description: responseFormat.description } } : { type: "json_object" } : void 0, stop: stopSequences, seed, verbosity: openaiOptions.textVerbosity, // openai specific settings: // TODO AI SDK 6: remove, we auto-map maxOutputTokens now max_completion_tokens: openaiOptions.maxCompletionTokens, store: openaiOptions.store, metadata: openaiOptions.metadata, prediction: openaiOptions.prediction, reasoning_effort: openaiOptions.reasoningEffort, service_tier: openaiOptions.serviceTier, prompt_cache_key: openaiOptions.promptCacheKey, prompt_cache_retention: openaiOptions.promptCacheRetention, safety_identifier: openaiOptions.safetyIdentifier, // messages: messages }; if (isReasoningModel) { if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) { if (baseArgs.temperature != null) { baseArgs.temperature = void 0; warnings.push({ type: "unsupported", feature: "temperature", details: "temperature is not supported for reasoning models" }); } if (baseArgs.top_p != null) { baseArgs.top_p = void 0; warnings.push({ type: "unsupported", feature: "topP", details: "topP is not supported for reasoning models" }); } if (baseArgs.logprobs != null) { baseArgs.logprobs = void 0; warnings.push({ type: "other", message: "logprobs is not supported for reasoning models" }); } } if (baseArgs.frequency_penalty != null) { baseArgs.frequency_penalty = void 0; warnings.push({ type: "unsupported", feature: "frequencyPenalty", details: "frequencyPenalty is not supported for reasoning models" }); } if (baseArgs.presence_penalty != null) { baseArgs.presence_penalty = void 0; warnings.push({ type: "unsupported", feature: "presencePenalty", details: "presencePenalty is not supported for reasoning models" }); } if (baseArgs.logit_bias != null) { baseArgs.logit_bias = void 0; warnings.push({ type: "other", message: "logitBias is not supported for reasoning models" }); } if (baseArgs.top_logprobs != null) { baseArgs.top_logprobs = void 0; warnings.push({ type: "other", message: "topLogprobs is not supported for reasoning models" }); } if (baseArgs.max_tokens != null) { if (baseArgs.max_completion_tokens == null) { baseArgs.max_completion_tokens = baseArgs.max_tokens; } baseArgs.max_tokens = void 0; } } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) { if (baseArgs.temperature != null) { baseArgs.temperature = void 0; warnings.push({ type: "unsupported", feature: "temperature", details: "temperature is not supported for the search preview models and has been removed." }); } } if (openaiOptions.serviceTier === "flex" && !modelCapabilities.supportsFlexProcessing) { warnings.push({ type: "unsupported", feature: "serviceTier", details: "flex processing is only available for o3, o4-mini, and gpt-5 models" }); baseArgs.service_tier = void 0; } if (openaiOptions.serviceTier === "priority" && !modelCapabilities.supportsPriorityProcessing) { warnings.push({ type: "unsupported", feature: "serviceTier", details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported" }); baseArgs.service_tier = void 0; } const { tools: openaiTools, toolChoice: openaiToolChoice, toolWarnings } = prepareChatTools({ tools, toolChoice }); return { args: { ...baseArgs, tools: openaiTools, tool_choice: openaiToolChoice }, warnings: [...warnings, ...toolWarnings] }; } async doGenerate(options) { var _a, _b, _c, _d, _e, _f, _g; const { args: body, warnings } = await this.getArgs(options); const { responseHeaders, value: response, rawValue: rawResponse } = await (0, import_provider_utils5.postJsonToApi)({ url: this.config.url({ path: "/chat/completions", modelId: this.modelId }), headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers), body, failedResponseHandler: openaiFailedResponseHandler, successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)( openaiChatResponseSchema ), abortSignal: options.abortSignal, fetch: this.config.fetch }); const choice = response.choices[0]; const content = []; const text = choice.message.content; if (text != null && text.length > 0) { content.push({ type: "text", text }); } for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) { content.push({ type: "tool-call", toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils5.generateId)(), toolName: toolCall.function.name, input: toolCall.function.arguments }); } for (const annotation of (_c = choice.message.annotations) != null ? _c : []) { content.push({ type: "source", sourceType: "url", id: (0, import_provider_utils5.generateId)(), url: annotation.url_citation.url, title: annotation.url_citation.title }); } const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details; const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details; const providerMetadata = { openai: {} }; if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) { providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens; } if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) { providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens; } if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) { providerMetadata.openai.logprobs = choice.logprobs.content; } return { content, finishReason: { unified: mapOpenAIFinishReason(choice.finish_reason), raw: (_g = choice.finish_reason) != null ? _g : void 0 }, usage: convertOpenAIChatUsage(response.usage), request: { body }, response: { ...getResponseMetadata(response), headers: responseHeaders, body: rawResponse }, warnings, providerMetadata }; } async doStream(options) { const { args, warnings } = await this.getArgs(options); const body = { ...args, stream: true, stream_options: { include_usage: true } }; const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({ url: this.config.url({ path: "/chat/completions", modelId: this.modelId }), headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers), body, failedResponseHandler: openaiFailedResponseHandler, successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)( openaiChatChunkSchema ), abortSignal: options.abortSignal, fetch: this.config.fetch }); const toolCalls = []; let finishReason = { unified: "other", raw: void 0 }; let usage = void 0; let metadataExtracted = false; let isActiveText = false; const providerMetadata = { openai: {} }; return { stream: response.pipeThrough( new TransformStream({ start(controller) { controller.enqueue({ type: "stream-start", warnings }); }, transform(chunk, controller) { var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q; if (options.includeRawChunks) { controller.enqueue({ type: "raw", rawValue: chunk.rawValue }); } if (!chunk.success) { finishReason = { unified: "error", raw: void 0 }; controller.enqueue({ type: "error", error: chunk.error }); return; } const value = chunk.value; if ("error" in value) { finishReason = { unified: "error", raw: void 0 }; controller.enqueue({ type: "error", error: value.error }); return; } if (!metadataExtracted) { const metadata = getResponseMetadata(value); if (Object.values(metadata).some(Boolean)) { metadataExtracted = true; controller.enqueue({ type: "response-metadata", ...getResponseMetadata(value) }); } } if (value.usage != null) { usage = value.usage; if (((_a = value.usage.completion_tokens_details) == null ? void 0 : _a.accepted_prediction_tokens) != null) { providerMetadata.openai.acceptedPredictionTokens = (_b = value.usage.completion_tokens_details) == null ? void 0 : _b.accepted_prediction_tokens; } if (((_c = value.usage.completion_tokens_details) == null ? void 0 : _c.rejected_prediction_tokens) != null) { providerMetadata.openai.rejectedPredictionTokens = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.rejected_prediction_tokens; } } const choice = value.choices[0]; if ((choice == null ? void 0 : choice.finish_reason) != null) { finishReason = { unified: mapOpenAIFinishReason(choice.finish_reason), raw: choice.finish_reason }; } if (((_e = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _e.content) != null) { providerMetadata.openai.logprobs = choice.logprobs.content; } if ((choice == null ? void 0 : choice.delta) == null) { return; } const delta = choice.delta; if (delta.content != null) { if (!isActiveText) { controller.enqueue({ type: "text-start", id: "0" }); isActiveText = true; } controller.enqueue({ type: "text-delta", id: "0", delta: delta.content }); } if (delta.tool_calls != null) { for (const toolCallDelta of delta.tool_calls) { const index = toolCallDelta.index; if (toolCalls[index] == null) { if (toolCallDelta.type !== "function") { throw new import_provider3.InvalidResponseDataError({ data: toolCallDelta, message: `Expected 'function' type.` }); } if (toolCallDelta.id == null) { throw new import_provider3.InvalidResponseDataError({ data: toolCallDelta, message: `Expected 'id' to be a string.` }); } if (((_f = toolCallDelta.function) == null ? void 0 : _f.name) == null) { throw new import_provider3.InvalidResponseDataError({ data: toolCallDelta, message: `Expected 'function.name' to be a string.` }); } controller.enqueue({ type: "tool-input-start", id: toolCallDelta.id, toolName: toolCallDelta.function.name }); toolCalls[index] = { id: toolCallDelta.id, type: "function", function: { name: toolCallDelta.function.name, arguments: (_g = toolCallDelta.function.arguments) != null ? _g : "" }, hasFinished: false }; const toolCall2 = toolCalls[index]; if (((_h = toolCall2.function) == null ? void 0 : _h.name) != null && ((_i = toolCall2.function) == null ? void 0 : _i.arguments) != null) { if (toolCall2.function.arguments.length > 0) { controller.enqueue({ type: "tool-input-delta", id: toolCall2.id, delta: toolCall2.function.arguments }); } if ((0, import_provider_utils5.isParsableJson)(toolCall2.function.arguments)) { controller.enqueue({ type: "tool-input-end", id: toolCall2.id }); controller.enqueue({ type: "tool-call", toolCallId: (_j = toolCall2.id) != null ? _j : (0, import_provider_utils5.generateId)(), toolName: toolCall2.function.name, input: toolCall2.function.arguments }); toolCall2.hasFinished = true; } } continue; } const toolCall = toolCalls[index]; if (toolCall.hasFinished) { continue; } if (((_k = toolCallDelta.function) == null ? void 0 : _k.arguments) != null) { toolCall.function.arguments += (_m = (_l = toolCallDelta.function) == null ? void 0 : _l.arguments) != null ? _m : ""; } controller.enqueue({ type: "tool-input-delta", id: toolCall.id, delta: (_n = toolCallDelta.function.arguments) != null ? _n : "" }); if (((_o = toolCall.function) == null ? void 0 : _o.name) != null && ((_p = toolCall.function) == null ? void 0 : _p.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) { controller.enqueue({ type: "tool-input-end", id: toolCall.id }); controller.enqueue({ type: "tool-call", toolCallId: (_q = toolCall.id) != null ? _q : (0, import_provider_utils5.generateId)(), toolName: toolCall.function.name, input: toolCall.function.arguments }); toolCall.hasFinished = true; } } } if (delta.annotations != null) { for (const annotation of delta.annotations) { controller.enqueue({ type: "source", sourceType: "url", id: (0, import_provider_utils5.generateId)(), url: annotation.url_citation.url, title: annotation.url_citation.title }); } } }, flush(controller) { if (isActiveText) { controller.enqueue({ type: "text-end", id: "0" }); } controller.enqueue({ type: "finish", finishReason, usage: convertOpenAIChatUsage(usage), ...providerMetadata != null ? { providerMetadata } : {} }); } }) ), request: { body }, response: { headers: responseHeaders } }; } }; // src/completion/openai-completion-language-model.ts var import_provider_utils8 = require("@ai-sdk/provider-utils"); // src/completion/convert-openai-completion-usage.ts function convertOpenAICompletionUsage(usage) { var _a, _b, _c, _d; if (usage == null) { return { inputTokens: { total: void 0, noCache: void 0, cacheRead: void 0, cacheWrite: void 0 }, outputTokens: { total: void 0, text: void 0, reasoning: void 0 }, raw: void 0 }; } const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0; const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0; return { inputTokens: { total: (_c = usage.prompt_tokens) != null ? _c : void 0, noCache: promptTokens, cacheRead: void 0, cacheWrite: void 0 }, outputTokens: { total: (_d = usage.completion_tokens) != null ? _d : void 0, text: completionTokens, reasoning: void 0 }, raw: usage }; } // src/completion/convert-to-openai-completion-prompt.ts var import_provider4 = require("@ai-sdk/provider"); function convertToOpenAICompletionPrompt({ prompt, user = "user", assistant = "assistant" }) { let text = ""; if (prompt[0].role === "system") { text += `${prompt[0].content} `; prompt = prompt.slice(1); } for (const { role, content } of prompt) { switch (role) { case "system": { throw new import_provider4.InvalidPromptError({ message: "Unexpected system message in prompt: ${content}", prompt }); } case "user": { const userMessage = content.map((part) => { switch (part.type) { case "text": { return part.text; } } }).filter(Boolean).join(""); text += `${user}: ${userMessage} `; break; } case "assistant": { const assistantMessage = content.map((part) => { switch (part.type) { case "text": { return part.text; } case "tool-call": { throw new import_provider4.UnsupportedFunctionalityError({ functionality: "tool-call messages" }); } } }).join(""); text += `${assistant}: ${assistantMessage} `; break; } case "tool": { throw new import_provider4.UnsupportedFunctionalityError({ functionality: "tool messages" }); } default: { const _exhaustiveCheck = role; throw new Error(`Unsupported role: ${_exhaustiveCheck}`); } } } text += `${assistant}: `; return { prompt: text, stopSequences: [` ${user}:`] }; } // src/completion/get-response-metadata.ts function getResponseMetadata2({ id, model, created }) { return { id: id != null ? id : void 0, modelId: model != null ? model : void 0, timestamp: created != null ? new Date(created * 1e3) : void 0 }; } // src/completion/map-openai-finish-reason.ts function mapOpenAIFinishReason2(finishReason) { switch (finishReason) { case "stop": return "stop"; case "length": return "length"; case "content_filter": return "content-filter"; case "function_call": case "tool_calls": return "tool-calls"; default: return "other"; } } // src/completion/openai-completion-api.ts var import_v44 = require("zod/v4"); var import_provider_utils6 = require("@ai-sdk/provider-utils"); var openaiCompletionResponseSchema = (0, import_provider_utils6.lazySchema)( () => (0, import_provider_utils6.zodSchema)( import_v44.z.object({ id: import_v44.z.string().nullish(), created: import_v44.z.number().nullish(), model: import_v44.z.string().nullish(), choices: import_v44.z.array( import_v44.z.object({ text: import_v44.z.string(), finish_reason: import_v44.z.string(), logprobs: import_v44.z.object({ tokens: import_v44.z.array(import_v44.z.string()), token_logprobs: import_v44.z.array(import_v44.z.number()), top_logprobs: import_v44.z.array(import_v44.z.record(import_v44.z.string(), import_v44.z.number())).nullish() }).nullish() }) ), usage: import_v44.z.object({ prompt_tokens: import_v44.z.number(), completion_tokens: import_v44.z.number(), total_tokens: import_v44.z.number() }).nullish() }) ) ); var openaiCompletionChunkSchema = (0, import_provider_utils6.lazySchema)( () => (0, import_provider_utils6.zodSchema)( import_v44.z.union([ import_v44.z.object({ id: import_v44.z.string().nullish(), created: import_v44.z.number().nullish(), model: import_v44.z.string().nullish(), choices: import_v44