UNPKG

@ai-sdk/openai

Version:

The **[OpenAI provider](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the OpenAI chat and completion APIs and embedding model support for the OpenAI embeddings API.

1 lines 196 kB
{"version":3,"sources":["../../src/openai-chat-language-model.ts","../../src/convert-to-openai-chat-messages.ts","../../src/get-response-metadata.ts","../../src/map-openai-finish-reason.ts","../../src/openai-chat-options.ts","../../src/openai-error.ts","../../src/openai-prepare-tools.ts","../../src/tool/file-search.ts","../../src/tool/web-search-preview.ts","../../src/openai-completion-language-model.ts","../../src/convert-to-openai-completion-prompt.ts","../../src/openai-completion-options.ts","../../src/openai-embedding-model.ts","../../src/openai-embedding-options.ts","../../src/openai-image-model.ts","../../src/openai-image-settings.ts","../../src/openai-transcription-model.ts","../../src/openai-transcription-options.ts","../../src/openai-speech-model.ts","../../src/responses/openai-responses-language-model.ts","../../src/responses/convert-to-openai-responses-messages.ts","../../src/responses/map-openai-responses-finish-reason.ts","../../src/responses/openai-responses-prepare-tools.ts"],"sourcesContent":["import {\n InvalidResponseDataError,\n LanguageModelV2,\n LanguageModelV2CallOptions,\n LanguageModelV2CallWarning,\n LanguageModelV2Content,\n LanguageModelV2FinishReason,\n LanguageModelV2StreamPart,\n LanguageModelV2Usage,\n SharedV2ProviderMetadata,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n ParseResult,\n combineHeaders,\n createEventSourceResponseHandler,\n createJsonResponseHandler,\n generateId,\n isParsableJson,\n parseProviderOptions,\n postJsonToApi,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport { convertToOpenAIChatMessages } from './convert-to-openai-chat-messages';\nimport { getResponseMetadata } from './get-response-metadata';\nimport { mapOpenAIFinishReason } from './map-openai-finish-reason';\nimport {\n OpenAIChatModelId,\n openaiProviderOptions,\n} from './openai-chat-options';\nimport {\n openaiErrorDataSchema,\n openaiFailedResponseHandler,\n} from './openai-error';\nimport { prepareTools } from './openai-prepare-tools';\n\ntype OpenAIChatConfig = {\n provider: string;\n headers: () => Record<string, string | undefined>;\n url: (options: { modelId: string; path: string }) => string;\n fetch?: FetchFunction;\n};\n\nexport class OpenAIChatLanguageModel implements LanguageModelV2 {\n readonly specificationVersion = 'v2';\n\n readonly modelId: OpenAIChatModelId;\n\n readonly supportedUrls = {\n 'image/*': [/^https?:\\/\\/.*$/],\n };\n\n private readonly config: OpenAIChatConfig;\n\n constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig) {\n this.modelId = modelId;\n this.config = config;\n }\n\n get provider(): string {\n return this.config.provider;\n }\n\n private async getArgs({\n prompt,\n maxOutputTokens,\n temperature,\n topP,\n topK,\n frequencyPenalty,\n presencePenalty,\n stopSequences,\n responseFormat,\n seed,\n tools,\n toolChoice,\n providerOptions,\n }: LanguageModelV2CallOptions) {\n const warnings: LanguageModelV2CallWarning[] = [];\n\n // Parse provider options\n const openaiOptions =\n (await parseProviderOptions({\n provider: 'openai',\n providerOptions,\n schema: openaiProviderOptions,\n })) ?? {};\n\n const structuredOutputs = openaiOptions.structuredOutputs ?? true;\n\n if (topK != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'topK',\n });\n }\n\n if (\n responseFormat?.type === 'json' &&\n responseFormat.schema != null &&\n !structuredOutputs\n ) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'responseFormat',\n details:\n 'JSON response format schema is only supported with structuredOutputs',\n });\n }\n\n const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(\n {\n prompt,\n systemMessageMode: getSystemMessageMode(this.modelId),\n },\n );\n\n warnings.push(...messageWarnings);\n\n const strictJsonSchema = openaiOptions.strictJsonSchema ?? false;\n\n const baseArgs = {\n // model id:\n model: this.modelId,\n\n // model specific settings:\n logit_bias: openaiOptions.logitBias,\n logprobs:\n openaiOptions.logprobs === true ||\n typeof openaiOptions.logprobs === 'number'\n ? true\n : undefined,\n top_logprobs:\n typeof openaiOptions.logprobs === 'number'\n ? openaiOptions.logprobs\n : typeof openaiOptions.logprobs === 'boolean'\n ? openaiOptions.logprobs\n ? 0\n : undefined\n : undefined,\n user: openaiOptions.user,\n parallel_tool_calls: openaiOptions.parallelToolCalls,\n\n // standardized settings:\n max_tokens: maxOutputTokens,\n temperature,\n top_p: topP,\n frequency_penalty: frequencyPenalty,\n presence_penalty: presencePenalty,\n response_format:\n responseFormat?.type === 'json'\n ? structuredOutputs && responseFormat.schema != null\n ? {\n type: 'json_schema',\n json_schema: {\n schema: responseFormat.schema,\n strict: strictJsonSchema,\n name: responseFormat.name ?? 'response',\n description: responseFormat.description,\n },\n }\n : { type: 'json_object' }\n : undefined,\n stop: stopSequences,\n seed,\n\n // openai specific settings:\n // TODO remove in next major version; we auto-map maxOutputTokens now\n max_completion_tokens: openaiOptions.maxCompletionTokens,\n store: openaiOptions.store,\n metadata: openaiOptions.metadata,\n prediction: openaiOptions.prediction,\n reasoning_effort: openaiOptions.reasoningEffort,\n service_tier: openaiOptions.serviceTier,\n\n // messages:\n messages,\n };\n\n if (isReasoningModel(this.modelId)) {\n // remove unsupported settings for reasoning models\n // see https://platform.openai.com/docs/guides/reasoning#limitations\n if (baseArgs.temperature != null) {\n baseArgs.temperature = undefined;\n warnings.push({\n type: 'unsupported-setting',\n setting: 'temperature',\n details: 'temperature is not supported for reasoning models',\n });\n }\n if (baseArgs.top_p != null) {\n baseArgs.top_p = undefined;\n warnings.push({\n type: 'unsupported-setting',\n setting: 'topP',\n details: 'topP is not supported for reasoning models',\n });\n }\n if (baseArgs.frequency_penalty != null) {\n baseArgs.frequency_penalty = undefined;\n warnings.push({\n type: 'unsupported-setting',\n setting: 'frequencyPenalty',\n details: 'frequencyPenalty is not supported for reasoning models',\n });\n }\n if (baseArgs.presence_penalty != null) {\n baseArgs.presence_penalty = undefined;\n warnings.push({\n type: 'unsupported-setting',\n setting: 'presencePenalty',\n details: 'presencePenalty is not supported for reasoning models',\n });\n }\n if (baseArgs.logit_bias != null) {\n baseArgs.logit_bias = undefined;\n warnings.push({\n type: 'other',\n message: 'logitBias is not supported for reasoning models',\n });\n }\n if (baseArgs.logprobs != null) {\n baseArgs.logprobs = undefined;\n warnings.push({\n type: 'other',\n message: 'logprobs is not supported for reasoning models',\n });\n }\n if (baseArgs.top_logprobs != null) {\n baseArgs.top_logprobs = undefined;\n warnings.push({\n type: 'other',\n message: 'topLogprobs is not supported for reasoning models',\n });\n }\n\n // reasoning models use max_completion_tokens instead of max_tokens:\n if (baseArgs.max_tokens != null) {\n if (baseArgs.max_completion_tokens == null) {\n baseArgs.max_completion_tokens = baseArgs.max_tokens;\n }\n baseArgs.max_tokens = undefined;\n }\n } else if (\n this.modelId.startsWith('gpt-4o-search-preview') ||\n this.modelId.startsWith('gpt-4o-mini-search-preview')\n ) {\n if (baseArgs.temperature != null) {\n baseArgs.temperature = undefined;\n warnings.push({\n type: 'unsupported-setting',\n setting: 'temperature',\n details:\n 'temperature is not supported for the search preview models and has been removed.',\n });\n }\n }\n\n // Validate flex processing support\n if (\n openaiOptions.serviceTier === 'flex' &&\n !supportsFlexProcessing(this.modelId)\n ) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'serviceTier',\n details: 'flex processing is only available for o3 and o4-mini models',\n });\n baseArgs.service_tier = undefined;\n }\n\n // Validate priority processing support\n if (\n openaiOptions.serviceTier === 'priority' &&\n !supportsPriorityProcessing(this.modelId)\n ) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'serviceTier',\n details:\n 'priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access',\n });\n baseArgs.service_tier = undefined;\n }\n\n const {\n tools: openaiTools,\n toolChoice: openaiToolChoice,\n toolWarnings,\n } = prepareTools({\n tools,\n toolChoice,\n structuredOutputs,\n strictJsonSchema,\n });\n\n return {\n args: {\n ...baseArgs,\n tools: openaiTools,\n tool_choice: openaiToolChoice,\n },\n warnings: [...warnings, ...toolWarnings],\n };\n }\n\n async doGenerate(\n options: Parameters<LanguageModelV2['doGenerate']>[0],\n ): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>> {\n const { args: body, warnings } = await this.getArgs(options);\n\n const {\n responseHeaders,\n value: response,\n rawValue: rawResponse,\n } = await postJsonToApi({\n url: this.config.url({\n path: '/chat/completions',\n modelId: this.modelId,\n }),\n headers: combineHeaders(this.config.headers(), options.headers),\n body,\n failedResponseHandler: openaiFailedResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(\n openaiChatResponseSchema,\n ),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n const choice = response.choices[0];\n const content: Array<LanguageModelV2Content> = [];\n\n // text content:\n const text = choice.message.content;\n if (text != null && text.length > 0) {\n content.push({ type: 'text', text });\n }\n\n // tool calls:\n for (const toolCall of choice.message.tool_calls ?? []) {\n content.push({\n type: 'tool-call' as const,\n toolCallId: toolCall.id ?? generateId(),\n toolName: toolCall.function.name,\n input: toolCall.function.arguments!,\n });\n }\n\n // annotations/citations:\n for (const annotation of choice.message.annotations ?? []) {\n content.push({\n type: 'source',\n sourceType: 'url',\n id: generateId(),\n url: annotation.url,\n title: annotation.title,\n });\n }\n\n // provider metadata:\n const completionTokenDetails = response.usage?.completion_tokens_details;\n const promptTokenDetails = response.usage?.prompt_tokens_details;\n const providerMetadata: SharedV2ProviderMetadata = { openai: {} };\n if (completionTokenDetails?.accepted_prediction_tokens != null) {\n providerMetadata.openai.acceptedPredictionTokens =\n completionTokenDetails?.accepted_prediction_tokens;\n }\n if (completionTokenDetails?.rejected_prediction_tokens != null) {\n providerMetadata.openai.rejectedPredictionTokens =\n completionTokenDetails?.rejected_prediction_tokens;\n }\n if (choice.logprobs?.content != null) {\n providerMetadata.openai.logprobs = choice.logprobs.content;\n }\n\n return {\n content,\n finishReason: mapOpenAIFinishReason(choice.finish_reason),\n usage: {\n inputTokens: response.usage?.prompt_tokens ?? undefined,\n outputTokens: response.usage?.completion_tokens ?? undefined,\n totalTokens: response.usage?.total_tokens ?? undefined,\n reasoningTokens: completionTokenDetails?.reasoning_tokens ?? undefined,\n cachedInputTokens: promptTokenDetails?.cached_tokens ?? undefined,\n },\n request: { body },\n response: {\n ...getResponseMetadata(response),\n headers: responseHeaders,\n body: rawResponse,\n },\n warnings,\n providerMetadata,\n };\n }\n\n async doStream(\n options: Parameters<LanguageModelV2['doStream']>[0],\n ): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>> {\n const { args, warnings } = await this.getArgs(options);\n\n const body = {\n ...args,\n stream: true,\n stream_options: {\n include_usage: true,\n },\n };\n\n const { responseHeaders, value: response } = await postJsonToApi({\n url: this.config.url({\n path: '/chat/completions',\n modelId: this.modelId,\n }),\n headers: combineHeaders(this.config.headers(), options.headers),\n body,\n failedResponseHandler: openaiFailedResponseHandler,\n successfulResponseHandler: createEventSourceResponseHandler(\n openaiChatChunkSchema,\n ),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n const toolCalls: Array<{\n id: string;\n type: 'function';\n function: {\n name: string;\n arguments: string;\n };\n hasFinished: boolean;\n }> = [];\n\n let finishReason: LanguageModelV2FinishReason = 'unknown';\n const usage: LanguageModelV2Usage = {\n inputTokens: undefined,\n outputTokens: undefined,\n totalTokens: undefined,\n };\n let isFirstChunk = true;\n let isActiveText = false;\n\n const providerMetadata: SharedV2ProviderMetadata = { openai: {} };\n\n return {\n stream: response.pipeThrough(\n new TransformStream<\n ParseResult<z.infer<typeof openaiChatChunkSchema>>,\n LanguageModelV2StreamPart\n >({\n start(controller) {\n controller.enqueue({ type: 'stream-start', warnings });\n },\n\n transform(chunk, controller) {\n if (options.includeRawChunks) {\n controller.enqueue({ type: 'raw', rawValue: chunk.rawValue });\n }\n\n // handle failed chunk parsing / validation:\n if (!chunk.success) {\n finishReason = 'error';\n controller.enqueue({ type: 'error', error: chunk.error });\n return;\n }\n\n const value = chunk.value;\n\n // handle error chunks:\n if ('error' in value) {\n finishReason = 'error';\n controller.enqueue({ type: 'error', error: value.error });\n return;\n }\n\n if (isFirstChunk) {\n isFirstChunk = false;\n\n controller.enqueue({\n type: 'response-metadata',\n ...getResponseMetadata(value),\n });\n }\n\n if (value.usage != null) {\n usage.inputTokens = value.usage.prompt_tokens ?? undefined;\n usage.outputTokens = value.usage.completion_tokens ?? undefined;\n usage.totalTokens = value.usage.total_tokens ?? undefined;\n usage.reasoningTokens =\n value.usage.completion_tokens_details?.reasoning_tokens ??\n undefined;\n usage.cachedInputTokens =\n value.usage.prompt_tokens_details?.cached_tokens ?? undefined;\n\n if (\n value.usage.completion_tokens_details\n ?.accepted_prediction_tokens != null\n ) {\n providerMetadata.openai.acceptedPredictionTokens =\n value.usage.completion_tokens_details?.accepted_prediction_tokens;\n }\n if (\n value.usage.completion_tokens_details\n ?.rejected_prediction_tokens != null\n ) {\n providerMetadata.openai.rejectedPredictionTokens =\n value.usage.completion_tokens_details?.rejected_prediction_tokens;\n }\n }\n\n const choice = value.choices[0];\n\n if (choice?.finish_reason != null) {\n finishReason = mapOpenAIFinishReason(choice.finish_reason);\n }\n\n if (choice?.logprobs?.content != null) {\n providerMetadata.openai.logprobs = choice.logprobs.content;\n }\n\n if (choice?.delta == null) {\n return;\n }\n\n const delta = choice.delta;\n\n if (delta.content != null) {\n if (!isActiveText) {\n controller.enqueue({ type: 'text-start', id: '0' });\n isActiveText = true;\n }\n\n controller.enqueue({\n type: 'text-delta',\n id: '0',\n delta: delta.content,\n });\n }\n\n if (delta.tool_calls != null) {\n for (const toolCallDelta of delta.tool_calls) {\n const index = toolCallDelta.index;\n\n // Tool call start. OpenAI returns all information except the arguments in the first chunk.\n if (toolCalls[index] == null) {\n if (toolCallDelta.type !== 'function') {\n throw new InvalidResponseDataError({\n data: toolCallDelta,\n message: `Expected 'function' type.`,\n });\n }\n\n if (toolCallDelta.id == null) {\n throw new InvalidResponseDataError({\n data: toolCallDelta,\n message: `Expected 'id' to be a string.`,\n });\n }\n\n if (toolCallDelta.function?.name == null) {\n throw new InvalidResponseDataError({\n data: toolCallDelta,\n message: `Expected 'function.name' to be a string.`,\n });\n }\n\n controller.enqueue({\n type: 'tool-input-start',\n id: toolCallDelta.id,\n toolName: toolCallDelta.function.name,\n });\n\n toolCalls[index] = {\n id: toolCallDelta.id,\n type: 'function',\n function: {\n name: toolCallDelta.function.name,\n arguments: toolCallDelta.function.arguments ?? '',\n },\n hasFinished: false,\n };\n\n const toolCall = toolCalls[index];\n\n if (\n toolCall.function?.name != null &&\n toolCall.function?.arguments != null\n ) {\n // send delta if the argument text has already started:\n if (toolCall.function.arguments.length > 0) {\n controller.enqueue({\n type: 'tool-input-delta',\n id: toolCall.id,\n delta: toolCall.function.arguments,\n });\n }\n\n // check if tool call is complete\n // (some providers send the full tool call in one chunk):\n if (isParsableJson(toolCall.function.arguments)) {\n controller.enqueue({\n type: 'tool-input-end',\n id: toolCall.id,\n });\n\n controller.enqueue({\n type: 'tool-call',\n toolCallId: toolCall.id ?? generateId(),\n toolName: toolCall.function.name,\n input: toolCall.function.arguments,\n });\n toolCall.hasFinished = true;\n }\n }\n\n continue;\n }\n\n // existing tool call, merge if not finished\n const toolCall = toolCalls[index];\n\n if (toolCall.hasFinished) {\n continue;\n }\n\n if (toolCallDelta.function?.arguments != null) {\n toolCall.function!.arguments +=\n toolCallDelta.function?.arguments ?? '';\n }\n\n // send delta\n controller.enqueue({\n type: 'tool-input-delta',\n id: toolCall.id,\n delta: toolCallDelta.function.arguments ?? '',\n });\n\n // check if tool call is complete\n if (\n toolCall.function?.name != null &&\n toolCall.function?.arguments != null &&\n isParsableJson(toolCall.function.arguments)\n ) {\n controller.enqueue({\n type: 'tool-input-end',\n id: toolCall.id,\n });\n\n controller.enqueue({\n type: 'tool-call',\n toolCallId: toolCall.id ?? generateId(),\n toolName: toolCall.function.name,\n input: toolCall.function.arguments,\n });\n toolCall.hasFinished = true;\n }\n }\n }\n\n // annotations/citations:\n if (delta.annotations != null) {\n for (const annotation of delta.annotations) {\n controller.enqueue({\n type: 'source',\n sourceType: 'url',\n id: generateId(),\n url: annotation.url,\n title: annotation.title,\n });\n }\n }\n },\n\n flush(controller) {\n if (isActiveText) {\n controller.enqueue({ type: 'text-end', id: '0' });\n }\n\n controller.enqueue({\n type: 'finish',\n finishReason,\n usage,\n ...(providerMetadata != null ? { providerMetadata } : {}),\n });\n },\n }),\n ),\n request: { body },\n response: { headers: responseHeaders },\n };\n }\n}\n\nconst openaiTokenUsageSchema = z\n .object({\n prompt_tokens: z.number().nullish(),\n completion_tokens: z.number().nullish(),\n total_tokens: z.number().nullish(),\n prompt_tokens_details: z\n .object({\n cached_tokens: z.number().nullish(),\n })\n .nullish(),\n completion_tokens_details: z\n .object({\n reasoning_tokens: z.number().nullish(),\n accepted_prediction_tokens: z.number().nullish(),\n rejected_prediction_tokens: z.number().nullish(),\n })\n .nullish(),\n })\n .nullish();\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst openaiChatResponseSchema = z.object({\n id: z.string().nullish(),\n created: z.number().nullish(),\n model: z.string().nullish(),\n choices: z.array(\n z.object({\n message: z.object({\n role: z.literal('assistant').nullish(),\n content: z.string().nullish(),\n tool_calls: z\n .array(\n z.object({\n id: z.string().nullish(),\n type: z.literal('function'),\n function: z.object({\n name: z.string(),\n arguments: z.string(),\n }),\n }),\n )\n .nullish(),\n annotations: z\n .array(\n z.object({\n type: z.literal('url_citation'),\n start_index: z.number(),\n end_index: z.number(),\n url: z.string(),\n title: z.string(),\n }),\n )\n .nullish(),\n }),\n index: z.number(),\n logprobs: z\n .object({\n content: z\n .array(\n z.object({\n token: z.string(),\n logprob: z.number(),\n top_logprobs: z.array(\n z.object({\n token: z.string(),\n logprob: z.number(),\n }),\n ),\n }),\n )\n .nullish(),\n })\n .nullish(),\n finish_reason: z.string().nullish(),\n }),\n ),\n usage: openaiTokenUsageSchema,\n});\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst openaiChatChunkSchema = z.union([\n z.object({\n id: z.string().nullish(),\n created: z.number().nullish(),\n model: z.string().nullish(),\n choices: z.array(\n z.object({\n delta: z\n .object({\n role: z.enum(['assistant']).nullish(),\n content: z.string().nullish(),\n tool_calls: z\n .array(\n z.object({\n index: z.number(),\n id: z.string().nullish(),\n type: z.literal('function').nullish(),\n function: z.object({\n name: z.string().nullish(),\n arguments: z.string().nullish(),\n }),\n }),\n )\n .nullish(),\n annotations: z\n .array(\n z.object({\n type: z.literal('url_citation'),\n start_index: z.number(),\n end_index: z.number(),\n url: z.string(),\n title: z.string(),\n }),\n )\n .nullish(),\n })\n .nullish(),\n logprobs: z\n .object({\n content: z\n .array(\n z.object({\n token: z.string(),\n logprob: z.number(),\n top_logprobs: z.array(\n z.object({\n token: z.string(),\n logprob: z.number(),\n }),\n ),\n }),\n )\n .nullish(),\n })\n .nullish(),\n finish_reason: z.string().nullish(),\n index: z.number(),\n }),\n ),\n usage: openaiTokenUsageSchema,\n }),\n openaiErrorDataSchema,\n]);\n\nfunction isReasoningModel(modelId: string) {\n return modelId.startsWith('o');\n}\n\nfunction supportsFlexProcessing(modelId: string) {\n return modelId.startsWith('o3') || modelId.startsWith('o4-mini');\n}\n\nfunction supportsPriorityProcessing(modelId: string) {\n return (\n modelId.startsWith('gpt-4') ||\n modelId.startsWith('o3') ||\n modelId.startsWith('o4-mini')\n );\n}\n\nfunction getSystemMessageMode(modelId: string) {\n if (!isReasoningModel(modelId)) {\n return 'system';\n }\n\n return (\n reasoningModels[modelId as keyof typeof reasoningModels]\n ?.systemMessageMode ?? 'developer'\n );\n}\n\nconst reasoningModels = {\n 'o1-mini': {\n systemMessageMode: 'remove',\n },\n 'o1-mini-2024-09-12': {\n systemMessageMode: 'remove',\n },\n 'o1-preview': {\n systemMessageMode: 'remove',\n },\n 'o1-preview-2024-09-12': {\n systemMessageMode: 'remove',\n },\n o3: {\n systemMessageMode: 'developer',\n },\n 'o3-2025-04-16': {\n systemMessageMode: 'developer',\n },\n 'o3-mini': {\n systemMessageMode: 'developer',\n },\n 'o3-mini-2025-01-31': {\n systemMessageMode: 'developer',\n },\n 'o4-mini': {\n systemMessageMode: 'developer',\n },\n 'o4-mini-2025-04-16': {\n systemMessageMode: 'developer',\n },\n} as const;\n","import {\n LanguageModelV2CallWarning,\n LanguageModelV2Prompt,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport { OpenAIChatPrompt } from './openai-chat-prompt';\nimport { convertToBase64 } from '@ai-sdk/provider-utils';\n\nexport function convertToOpenAIChatMessages({\n prompt,\n systemMessageMode = 'system',\n}: {\n prompt: LanguageModelV2Prompt;\n systemMessageMode?: 'system' | 'developer' | 'remove';\n}): {\n messages: OpenAIChatPrompt;\n warnings: Array<LanguageModelV2CallWarning>;\n} {\n const messages: OpenAIChatPrompt = [];\n const warnings: Array<LanguageModelV2CallWarning> = [];\n\n for (const { role, content } of prompt) {\n switch (role) {\n case 'system': {\n switch (systemMessageMode) {\n case 'system': {\n messages.push({ role: 'system', content });\n break;\n }\n case 'developer': {\n messages.push({ role: 'developer', content });\n break;\n }\n case 'remove': {\n warnings.push({\n type: 'other',\n message: 'system messages are removed for this model',\n });\n break;\n }\n default: {\n const _exhaustiveCheck: never = systemMessageMode;\n throw new Error(\n `Unsupported system message mode: ${_exhaustiveCheck}`,\n );\n }\n }\n break;\n }\n\n case 'user': {\n if (content.length === 1 && content[0].type === 'text') {\n messages.push({ role: 'user', content: content[0].text });\n break;\n }\n\n messages.push({\n role: 'user',\n content: content.map((part, index) => {\n switch (part.type) {\n case 'text': {\n return { type: 'text', text: part.text };\n }\n case 'file': {\n if (part.mediaType.startsWith('image/')) {\n const mediaType =\n part.mediaType === 'image/*'\n ? 'image/jpeg'\n : part.mediaType;\n\n return {\n type: 'image_url',\n image_url: {\n url:\n part.data instanceof URL\n ? part.data.toString()\n : `data:${mediaType};base64,${convertToBase64(part.data)}`,\n\n // OpenAI specific extension: image detail\n detail: part.providerOptions?.openai?.imageDetail,\n },\n };\n } else if (part.mediaType.startsWith('audio/')) {\n if (part.data instanceof URL) {\n throw new UnsupportedFunctionalityError({\n functionality: 'audio file parts with URLs',\n });\n }\n\n switch (part.mediaType) {\n case 'audio/wav': {\n return {\n type: 'input_audio',\n input_audio: {\n data: convertToBase64(part.data),\n format: 'wav',\n },\n };\n }\n case 'audio/mp3':\n case 'audio/mpeg': {\n return {\n type: 'input_audio',\n input_audio: {\n data: convertToBase64(part.data),\n format: 'mp3',\n },\n };\n }\n\n default: {\n throw new UnsupportedFunctionalityError({\n functionality: `audio content parts with media type ${part.mediaType}`,\n });\n }\n }\n } else if (part.mediaType === 'application/pdf') {\n if (part.data instanceof URL) {\n throw new UnsupportedFunctionalityError({\n functionality: 'PDF file parts with URLs',\n });\n }\n\n return {\n type: 'file',\n file: {\n filename: part.filename ?? `part-${index}.pdf`,\n file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`,\n },\n };\n } else {\n throw new UnsupportedFunctionalityError({\n functionality: `file part media type ${part.mediaType}`,\n });\n }\n }\n }\n }),\n });\n\n break;\n }\n\n case 'assistant': {\n let text = '';\n const toolCalls: Array<{\n id: string;\n type: 'function';\n function: { name: string; arguments: string };\n }> = [];\n\n for (const part of content) {\n switch (part.type) {\n case 'text': {\n text += part.text;\n break;\n }\n case 'tool-call': {\n toolCalls.push({\n id: part.toolCallId,\n type: 'function',\n function: {\n name: part.toolName,\n arguments: JSON.stringify(part.input),\n },\n });\n break;\n }\n }\n }\n\n messages.push({\n role: 'assistant',\n content: text,\n tool_calls: toolCalls.length > 0 ? toolCalls : undefined,\n });\n\n break;\n }\n\n case 'tool': {\n for (const toolResponse of content) {\n const output = toolResponse.output;\n\n let contentValue: string;\n switch (output.type) {\n case 'text':\n case 'error-text':\n contentValue = output.value;\n break;\n case 'content':\n case 'json':\n case 'error-json':\n contentValue = JSON.stringify(output.value);\n break;\n }\n\n messages.push({\n role: 'tool',\n tool_call_id: toolResponse.toolCallId,\n content: contentValue,\n });\n }\n break;\n }\n\n default: {\n const _exhaustiveCheck: never = role;\n throw new Error(`Unsupported role: ${_exhaustiveCheck}`);\n }\n }\n }\n\n return { messages, warnings };\n}\n","export function getResponseMetadata({\n id,\n model,\n created,\n}: {\n id?: string | undefined | null;\n created?: number | undefined | null;\n model?: string | undefined | null;\n}) {\n return {\n id: id ?? undefined,\n modelId: model ?? undefined,\n timestamp: created != null ? new Date(created * 1000) : undefined,\n };\n}\n","import { LanguageModelV2FinishReason } from '@ai-sdk/provider';\n\nexport function mapOpenAIFinishReason(\n finishReason: string | null | undefined,\n): LanguageModelV2FinishReason {\n switch (finishReason) {\n case 'stop':\n return 'stop';\n case 'length':\n return 'length';\n case 'content_filter':\n return 'content-filter';\n case 'function_call':\n case 'tool_calls':\n return 'tool-calls';\n default:\n return 'unknown';\n }\n}\n","import { z } from 'zod/v4';\n\n// https://platform.openai.com/docs/models\nexport type OpenAIChatModelId =\n | 'o1'\n | 'o1-2024-12-17'\n | 'o1-mini'\n | 'o1-mini-2024-09-12'\n | 'o1-preview'\n | 'o1-preview-2024-09-12'\n | 'o3-mini'\n | 'o3-mini-2025-01-31'\n | 'o3'\n | 'o3-2025-04-16'\n | 'o4-mini'\n | 'o4-mini-2025-04-16'\n | 'gpt-4.1'\n | 'gpt-4.1-2025-04-14'\n | 'gpt-4.1-mini'\n | 'gpt-4.1-mini-2025-04-14'\n | 'gpt-4.1-nano'\n | 'gpt-4.1-nano-2025-04-14'\n | 'gpt-4o'\n | 'gpt-4o-2024-05-13'\n | 'gpt-4o-2024-08-06'\n | 'gpt-4o-2024-11-20'\n | 'gpt-4o-audio-preview'\n | 'gpt-4o-audio-preview-2024-10-01'\n | 'gpt-4o-audio-preview-2024-12-17'\n | 'gpt-4o-search-preview'\n | 'gpt-4o-search-preview-2025-03-11'\n | 'gpt-4o-mini-search-preview'\n | 'gpt-4o-mini-search-preview-2025-03-11'\n | 'gpt-4o-mini'\n | 'gpt-4o-mini-2024-07-18'\n | 'gpt-4-turbo'\n | 'gpt-4-turbo-2024-04-09'\n | 'gpt-4-turbo-preview'\n | 'gpt-4-0125-preview'\n | 'gpt-4-1106-preview'\n | 'gpt-4'\n | 'gpt-4-0613'\n | 'gpt-4.5-preview'\n | 'gpt-4.5-preview-2025-02-27'\n | 'gpt-3.5-turbo-0125'\n | 'gpt-3.5-turbo'\n | 'gpt-3.5-turbo-1106'\n | 'chatgpt-4o-latest'\n | (string & {});\n\nexport const openaiProviderOptions = z.object({\n /**\n * Modify the likelihood of specified tokens appearing in the completion.\n *\n * Accepts a JSON object that maps tokens (specified by their token ID in\n * the GPT tokenizer) to an associated bias value from -100 to 100.\n */\n logitBias: z.record(z.coerce.number<string>(), z.number()).optional(),\n\n /**\n * Return the log probabilities of the tokens.\n *\n * Setting to true will return the log probabilities of the tokens that\n * were generated.\n *\n * Setting to a number will return the log probabilities of the top n\n * tokens that were generated.\n */\n logprobs: z.union([z.boolean(), z.number()]).optional(),\n\n /**\n * Whether to enable parallel function calling during tool use. Default to true.\n */\n parallelToolCalls: z.boolean().optional(),\n\n /**\n * A unique identifier representing your end-user, which can help OpenAI to\n * monitor and detect abuse.\n */\n user: z.string().optional(),\n\n /**\n * Reasoning effort for reasoning models. Defaults to `medium`.\n */\n reasoningEffort: z.enum(['low', 'medium', 'high']).optional(),\n\n /**\n * Maximum number of completion tokens to generate. Useful for reasoning models.\n */\n maxCompletionTokens: z.number().optional(),\n\n /**\n * Whether to enable persistence in responses API.\n */\n store: z.boolean().optional(),\n\n /**\n * Metadata to associate with the request.\n */\n metadata: z.record(z.string().max(64), z.string().max(512)).optional(),\n\n /**\n * Parameters for prediction mode.\n */\n prediction: z.record(z.string(), z.any()).optional(),\n\n /**\n * Whether to use structured outputs.\n *\n * @default true\n */\n structuredOutputs: z.boolean().optional(),\n\n /**\n * Service tier for the request.\n * - 'auto': Default service tier\n * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.\n * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.\n *\n * @default 'auto'\n */\n serviceTier: z.enum(['auto', 'flex', 'priority']).optional(),\n\n /**\n * Whether to use strict JSON schema validation.\n *\n * @default false\n */\n strictJsonSchema: z.boolean().optional(),\n});\n\nexport type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;\n","import { z } from 'zod/v4';\nimport { createJsonErrorResponseHandler } from '@ai-sdk/provider-utils';\n\nexport const openaiErrorDataSchema = z.object({\n error: z.object({\n message: z.string(),\n\n // The additional information below is handled loosely to support\n // OpenAI-compatible providers that have slightly different error\n // responses:\n type: z.string().nullish(),\n param: z.any().nullish(),\n code: z.union([z.string(), z.number()]).nullish(),\n }),\n});\n\nexport type OpenAIErrorData = z.infer<typeof openaiErrorDataSchema>;\n\nexport const openaiFailedResponseHandler = createJsonErrorResponseHandler({\n errorSchema: openaiErrorDataSchema,\n errorToMessage: data => data.error.message,\n});\n","import {\n LanguageModelV2CallOptions,\n LanguageModelV2CallWarning,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport { OpenAITools, OpenAIToolChoice } from './openai-types';\nimport { fileSearchArgsSchema } from './tool/file-search';\nimport { webSearchPreviewArgsSchema } from './tool/web-search-preview';\n\nexport function prepareTools({\n tools,\n toolChoice,\n structuredOutputs,\n strictJsonSchema,\n}: {\n tools: LanguageModelV2CallOptions['tools'];\n toolChoice?: LanguageModelV2CallOptions['toolChoice'];\n structuredOutputs: boolean;\n strictJsonSchema: boolean;\n}): {\n tools?: OpenAITools;\n toolChoice?: OpenAIToolChoice;\n toolWarnings: Array<LanguageModelV2CallWarning>;\n} {\n // when the tools array is empty, change it to undefined to prevent errors:\n tools = tools?.length ? tools : undefined;\n\n const toolWarnings: LanguageModelV2CallWarning[] = [];\n\n if (tools == null) {\n return { tools: undefined, toolChoice: undefined, toolWarnings };\n }\n\n const openaiTools: OpenAITools = [];\n\n for (const tool of tools) {\n switch (tool.type) {\n case 'function':\n openaiTools.push({\n type: 'function',\n function: {\n name: tool.name,\n description: tool.description,\n parameters: tool.inputSchema,\n strict: structuredOutputs ? strictJsonSchema : undefined,\n },\n });\n break;\n case 'provider-defined':\n switch (tool.id) {\n case 'openai.file_search': {\n const args = fileSearchArgsSchema.parse(tool.args);\n openaiTools.push({\n type: 'file_search',\n vector_store_ids: args.vectorStoreIds,\n max_num_results: args.maxNumResults,\n ranking_options: args.ranking\n ? { ranker: args.ranking.ranker }\n : undefined,\n filters: args.filters,\n });\n break;\n }\n case 'openai.web_search_preview': {\n const args = webSearchPreviewArgsSchema.parse(tool.args);\n openaiTools.push({\n type: 'web_search_preview',\n search_context_size: args.searchContextSize,\n user_location: args.userLocation,\n });\n break;\n }\n default:\n toolWarnings.push({ type: 'unsupported-tool', tool });\n break;\n }\n break;\n default:\n toolWarnings.push({ type: 'unsupported-tool', tool });\n break;\n }\n }\n\n if (toolChoice == null) {\n return { tools: openaiTools, toolChoice: undefined, toolWarnings };\n }\n\n const type = toolChoice.type;\n\n switch (type) {\n case 'auto':\n case 'none':\n case 'required':\n return { tools: openaiTools, toolChoice: type, toolWarnings };\n case 'tool':\n return {\n tools: openaiTools,\n toolChoice: {\n type: 'function',\n function: {\n name: toolChoice.toolName,\n },\n },\n toolWarnings,\n };\n default: {\n const _exhaustiveCheck: never = type;\n throw new UnsupportedFunctionalityError({\n functionality: `tool choice type: ${_exhaustiveCheck}`,\n });\n }\n }\n}\n","import { createProviderDefinedToolFactory } from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\n\n// Filter schemas\nconst comparisonFilterSchema = z.object({\n key: z.string(),\n type: z.enum(['eq', 'ne', 'gt', 'gte', 'lt', 'lte']),\n value: z.union([z.string(), z.number(), z.boolean()]),\n});\n\nconst compoundFilterSchema: z.ZodType<any> = z.object({\n type: z.enum(['and', 'or']),\n filters: z.array(\n z.union([comparisonFilterSchema, z.lazy(() => compoundFilterSchema)]),\n ),\n});\n\nconst filtersSchema = z.union([comparisonFilterSchema, compoundFilterSchema]);\n\n// Args validation schema\nexport const fileSearchArgsSchema = z.object({\n /**\n * List of vector store IDs to search through. If not provided, searches all available vector stores.\n */\n vectorStoreIds: z.array(z.string()).optional(),\n\n /**\n * Maximum number of search results to return. Defaults to 10.\n */\n maxNumResults: z.number().optional(),\n\n /**\n * Ranking options for the search.\n */\n ranking: z\n .object({\n ranker: z.enum(['auto', 'default-2024-08-21']).optional(),\n })\n .optional(),\n\n /**\n * A filter to apply based on file attributes.\n */\n filters: filtersSchema.optional(),\n});\n\nexport const fileSearch = createProviderDefinedToolFactory<\n {\n /**\n * The search query to execute.\n */\n query: string;\n },\n {\n /**\n * List of vector store IDs to search through. If not provided, searches all available vector stores.\n */\n vectorStoreIds?: string[];\n\n /**\n * Maximum number of search results to return. Defaults to 10.\n */\n maxNumResults?: number;\n\n /**\n * Ranking options for the search.\n */\n ranking?: {\n ranker?: 'auto' | 'default-2024-08-21';\n };\n\n /**\n * A filter to apply based on file attributes.\n */\n filters?:\n | {\n key: string;\n type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte';\n value: string | number | boolean;\n }\n | {\n type: 'and' | 'or';\n filters: any[];\n };\n }\n>({\n id: 'openai.file_search',\n name: 'file_search',\n inputSchema: z.object({\n query: z.string(),\n }),\n});\n","import { createProviderDefinedToolFactory } from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\n\n// Args validation schema\nexport const webSearchPreviewArgsSchema = z.object({\n /**\n * Search context size to use for the web search.\n * - high: Most comprehensive context, highest cost, slower response\n * - medium: Balanced context, cost, and latency (default)\n * - low: Least context, lowest cost, fastest response\n */\n searchContextSize: z.enum(['low', 'medium', 'high']).optional(),\n\n /**\n * User location information to provide geographically relevant search results.\n */\n userLocation: z\n .object({\n /**\n * Type of location (always 'approximate')\n */\n type: z.literal('approximate'),\n /**\n * Two-letter ISO country code (e.g., 'US', 'GB')\n */\n country: z.string().optional(),\n /**\n * City name (free text, e.g., 'Minneapolis')\n */\n city: z.string().optional(),\n /**\n * Region name (free text, e.g., 'Minnesota')\n */\n region: z.string().optional(),\n /**\n * IANA timezone (e.g., 'America/Chicago')\n */\n timezone: z.string().optional(),\n })\n .optional(),\n});\n\nexport const webSearchPreview = createProviderDefinedToolFactory<\n {\n // Web search doesn't take input parameters - it's controlled by the prompt\n },\n {\n /**\n * Search context size to use for the web search.\n * - high: Most comprehensive context, highest cost, slower response\n * - medium: Balanced context, cost, and latency (default)\n * - low: Least context, lowest cost, fastest response\n */\n searchContextSize?: 'low' | 'medium' | 'high';\n\n /**\n * User location information to provide geographically relevant search results.\n */\n userLocation?: {\n /**\n * Type of location (always 'approximate')\n */\n type: 'approximate';\n /**\n * Two-letter ISO country code (e.g., 'US', 'GB')\n */\n country?: string;\n /**\n * City name (free text, e.g., 'Minneapolis')\n */\n city?: string;\n /**\n * Region name (free text, e.g., 'Minnesota')\n */\n region?: string;\n /**\n * IANA timezone (e.g., 'America/Chicago')\n */\n timezone?: string;\n };\n }\n>({\n id: 'openai.web_search_preview',\n name: 'web_search_preview',\n inputSchema: z.object({}),\n});\n","import {\n LanguageModelV2,\n LanguageModelV2CallWarning,\n LanguageModelV2FinishReason,\n LanguageModelV2StreamPart,\n LanguageModelV2Usage,\n SharedV2ProviderMetadata,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n ParseResult,\n combineHeaders,\n createEventSourceResponseHandler,\n createJsonResponseHandler,\n parseProviderOptions,\n postJsonToApi,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport { convertToOpenAICompletionPrompt } from './convert-to-openai-completion-prompt';\nimport { getResponseMetadata } from './get-response-metadata';\nimport { mapOpenAIFinishReason } from './map-openai-finish-reason';\nimport {\n OpenAICompletionModelId,\n openaiCompletionProviderOptions,\n} from './openai-completion-options';\nimport {\n openaiErrorDataSchema,\n openaiFailedResponseHandler,\n} from './openai-error';\n\ntype OpenAICompletionConfig = {\n provider: string;\n headers: () => Record<string, string | undefined>;\n url: (options: { m