UNPKG

@ai-sdk/openai

Version:

The **[OpenAI provider](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the OpenAI chat and completion APIs and embedding model support for the OpenAI embeddings API.

1 lines 408 kB
{"version":3,"sources":["../src/openai-provider.ts","../src/chat/openai-chat-language-model.ts","../src/openai-error.ts","../src/openai-language-model-capabilities.ts","../src/chat/convert-openai-chat-usage.ts","../src/chat/convert-to-openai-chat-messages.ts","../src/chat/get-response-metadata.ts","../src/chat/map-openai-finish-reason.ts","../src/chat/openai-chat-api.ts","../src/chat/openai-chat-options.ts","../src/chat/openai-chat-prepare-tools.ts","../src/completion/openai-completion-language-model.ts","../src/completion/convert-openai-completion-usage.ts","../src/completion/convert-to-openai-completion-prompt.ts","../src/completion/get-response-metadata.ts","../src/completion/map-openai-finish-reason.ts","../src/completion/openai-completion-api.ts","../src/completion/openai-completion-options.ts","../src/embedding/openai-embedding-model.ts","../src/embedding/openai-embedding-options.ts","../src/embedding/openai-embedding-api.ts","../src/image/openai-image-model.ts","../src/image/openai-image-api.ts","../src/image/openai-image-options.ts","../src/tool/apply-patch.ts","../src/tool/code-interpreter.ts","../src/tool/file-search.ts","../src/tool/image-generation.ts","../src/tool/local-shell.ts","../src/tool/shell.ts","../src/tool/web-search.ts","../src/tool/web-search-preview.ts","../src/tool/mcp.ts","../src/openai-tools.ts","../src/responses/openai-responses-language-model.ts","../src/responses/convert-openai-responses-usage.ts","../src/responses/convert-to-openai-responses-input.ts","../src/responses/map-openai-responses-finish-reason.ts","../src/responses/openai-responses-api.ts","../src/responses/openai-responses-options.ts","../src/responses/openai-responses-prepare-tools.ts","../src/speech/openai-speech-model.ts","../src/speech/openai-speech-options.ts","../src/transcription/openai-transcription-model.ts","../src/transcription/openai-transcription-api.ts","../src/transcription/openai-transcription-options.ts","../src/version.ts"],"sourcesContent":["import {\n EmbeddingModelV3,\n ImageModelV3,\n LanguageModelV3,\n ProviderV3,\n SpeechModelV3,\n TranscriptionModelV3,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n loadApiKey,\n loadOptionalSetting,\n withoutTrailingSlash,\n withUserAgentSuffix,\n} from '@ai-sdk/provider-utils';\nimport { OpenAIChatLanguageModel } from './chat/openai-chat-language-model';\nimport { OpenAIChatModelId } from './chat/openai-chat-options';\nimport { OpenAICompletionLanguageModel } from './completion/openai-completion-language-model';\nimport { OpenAICompletionModelId } from './completion/openai-completion-options';\nimport { OpenAIEmbeddingModel } from './embedding/openai-embedding-model';\nimport { OpenAIEmbeddingModelId } from './embedding/openai-embedding-options';\nimport { OpenAIImageModel } from './image/openai-image-model';\nimport { OpenAIImageModelId } from './image/openai-image-options';\nimport { openaiTools } from './openai-tools';\nimport { OpenAIResponsesLanguageModel } from './responses/openai-responses-language-model';\nimport { OpenAIResponsesModelId } from './responses/openai-responses-options';\nimport { OpenAISpeechModel } from './speech/openai-speech-model';\nimport { OpenAISpeechModelId } from './speech/openai-speech-options';\nimport { OpenAITranscriptionModel } from './transcription/openai-transcription-model';\nimport { OpenAITranscriptionModelId } from './transcription/openai-transcription-options';\nimport { VERSION } from './version';\n\nexport interface OpenAIProvider extends ProviderV3 {\n (modelId: OpenAIResponsesModelId): LanguageModelV3;\n\n /**\nCreates an OpenAI model for text generation.\n */\n languageModel(modelId: OpenAIResponsesModelId): LanguageModelV3;\n\n /**\nCreates an OpenAI chat model for text generation.\n */\n chat(modelId: OpenAIChatModelId): LanguageModelV3;\n\n /**\nCreates an OpenAI responses API model for text generation.\n */\n responses(modelId: OpenAIResponsesModelId): LanguageModelV3;\n\n /**\nCreates an OpenAI completion model for text generation.\n */\n completion(modelId: OpenAICompletionModelId): LanguageModelV3;\n\n /**\nCreates a model for text embeddings.\n */\n embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;\n\n /**\nCreates a model for text embeddings.\n */\n embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;\n\n /**\n * @deprecated Use `embedding` instead.\n */\n textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;\n\n /**\n * @deprecated Use `embeddingModel` instead.\n */\n textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;\n\n /**\nCreates a model for image generation.\n */\n image(modelId: OpenAIImageModelId): ImageModelV3;\n\n /**\nCreates a model for image generation.\n */\n imageModel(modelId: OpenAIImageModelId): ImageModelV3;\n\n /**\nCreates a model for transcription.\n */\n transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV3;\n\n /**\nCreates a model for speech generation.\n */\n speech(modelId: OpenAISpeechModelId): SpeechModelV3;\n\n /**\nOpenAI-specific tools.\n */\n tools: typeof openaiTools;\n}\n\nexport interface OpenAIProviderSettings {\n /**\nBase URL for the OpenAI API calls.\n */\n baseURL?: string;\n\n /**\nAPI key for authenticating requests.\n */\n apiKey?: string;\n\n /**\nOpenAI Organization.\n */\n organization?: string;\n\n /**\nOpenAI project.\n */\n project?: string;\n\n /**\nCustom headers to include in the requests.\n */\n headers?: Record<string, string>;\n\n /**\nProvider name. Overrides the `openai` default name for 3rd party providers.\n */\n name?: string;\n\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n}\n\n/**\nCreate an OpenAI provider instance.\n */\nexport function createOpenAI(\n options: OpenAIProviderSettings = {},\n): OpenAIProvider {\n const baseURL =\n withoutTrailingSlash(\n loadOptionalSetting({\n settingValue: options.baseURL,\n environmentVariableName: 'OPENAI_BASE_URL',\n }),\n ) ?? 'https://api.openai.com/v1';\n\n const providerName = options.name ?? 'openai';\n\n const getHeaders = () =>\n withUserAgentSuffix(\n {\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'OPENAI_API_KEY',\n description: 'OpenAI',\n })}`,\n 'OpenAI-Organization': options.organization,\n 'OpenAI-Project': options.project,\n ...options.headers,\n },\n `ai-sdk/openai/${VERSION}`,\n );\n\n const createChatModel = (modelId: OpenAIChatModelId) =>\n new OpenAIChatLanguageModel(modelId, {\n provider: `${providerName}.chat`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createCompletionModel = (modelId: OpenAICompletionModelId) =>\n new OpenAICompletionLanguageModel(modelId, {\n provider: `${providerName}.completion`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createEmbeddingModel = (modelId: OpenAIEmbeddingModelId) =>\n new OpenAIEmbeddingModel(modelId, {\n provider: `${providerName}.embedding`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createImageModel = (modelId: OpenAIImageModelId) =>\n new OpenAIImageModel(modelId, {\n provider: `${providerName}.image`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createTranscriptionModel = (modelId: OpenAITranscriptionModelId) =>\n new OpenAITranscriptionModel(modelId, {\n provider: `${providerName}.transcription`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createSpeechModel = (modelId: OpenAISpeechModelId) =>\n new OpenAISpeechModel(modelId, {\n provider: `${providerName}.speech`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createLanguageModel = (modelId: OpenAIResponsesModelId) => {\n if (new.target) {\n throw new Error(\n 'The OpenAI model function cannot be called with the new keyword.',\n );\n }\n\n return createResponsesModel(modelId);\n };\n\n const createResponsesModel = (modelId: OpenAIResponsesModelId) => {\n return new OpenAIResponsesLanguageModel(modelId, {\n provider: `${providerName}.responses`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n fileIdPrefixes: ['file-'],\n });\n };\n\n const provider = function (modelId: OpenAIResponsesModelId) {\n return createLanguageModel(modelId);\n };\n\n provider.specificationVersion = 'v3' as const;\n provider.languageModel = createLanguageModel;\n provider.chat = createChatModel;\n provider.completion = createCompletionModel;\n provider.responses = createResponsesModel;\n provider.embedding = createEmbeddingModel;\n provider.embeddingModel = createEmbeddingModel;\n provider.textEmbedding = createEmbeddingModel;\n provider.textEmbeddingModel = createEmbeddingModel;\n\n provider.image = createImageModel;\n provider.imageModel = createImageModel;\n\n provider.transcription = createTranscriptionModel;\n provider.transcriptionModel = createTranscriptionModel;\n\n provider.speech = createSpeechModel;\n provider.speechModel = createSpeechModel;\n\n provider.tools = openaiTools;\n\n return provider as OpenAIProvider;\n}\n\n/**\nDefault OpenAI provider instance.\n */\nexport const openai = createOpenAI();\n","import {\n InvalidResponseDataError,\n LanguageModelV3,\n LanguageModelV3CallOptions,\n LanguageModelV3Content,\n LanguageModelV3FinishReason,\n LanguageModelV3GenerateResult,\n LanguageModelV3StreamPart,\n LanguageModelV3StreamResult,\n SharedV3ProviderMetadata,\n SharedV3Warning,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n ParseResult,\n combineHeaders,\n createEventSourceResponseHandler,\n createJsonResponseHandler,\n generateId,\n isParsableJson,\n parseProviderOptions,\n postJsonToApi,\n} from '@ai-sdk/provider-utils';\nimport { openaiFailedResponseHandler } from '../openai-error';\nimport { getOpenAILanguageModelCapabilities } from '../openai-language-model-capabilities';\nimport {\n OpenAIChatUsage,\n convertOpenAIChatUsage,\n} from './convert-openai-chat-usage';\nimport { convertToOpenAIChatMessages } from './convert-to-openai-chat-messages';\nimport { getResponseMetadata } from './get-response-metadata';\nimport { mapOpenAIFinishReason } from './map-openai-finish-reason';\nimport {\n OpenAIChatChunk,\n openaiChatChunkSchema,\n openaiChatResponseSchema,\n} from './openai-chat-api';\nimport {\n OpenAIChatModelId,\n openaiChatLanguageModelOptions,\n} from './openai-chat-options';\nimport { prepareChatTools } from './openai-chat-prepare-tools';\n\ntype OpenAIChatConfig = {\n provider: string;\n headers: () => Record<string, string | undefined>;\n url: (options: { modelId: string; path: string }) => string;\n fetch?: FetchFunction;\n};\n\nexport class OpenAIChatLanguageModel implements LanguageModelV3 {\n readonly specificationVersion = 'v3';\n\n readonly modelId: OpenAIChatModelId;\n\n readonly supportedUrls = {\n 'image/*': [/^https?:\\/\\/.*$/],\n };\n\n private readonly config: OpenAIChatConfig;\n\n constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig) {\n this.modelId = modelId;\n this.config = config;\n }\n\n get provider(): string {\n return this.config.provider;\n }\n\n private async getArgs({\n prompt,\n maxOutputTokens,\n temperature,\n topP,\n topK,\n frequencyPenalty,\n presencePenalty,\n stopSequences,\n responseFormat,\n seed,\n tools,\n toolChoice,\n providerOptions,\n }: LanguageModelV3CallOptions) {\n const warnings: SharedV3Warning[] = [];\n\n // Parse provider options\n const openaiOptions =\n (await parseProviderOptions({\n provider: 'openai',\n providerOptions,\n schema: openaiChatLanguageModelOptions,\n })) ?? {};\n\n const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);\n const isReasoningModel =\n openaiOptions.forceReasoning ?? modelCapabilities.isReasoningModel;\n\n if (topK != null) {\n warnings.push({ type: 'unsupported', feature: 'topK' });\n }\n\n const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(\n {\n prompt,\n systemMessageMode:\n openaiOptions.systemMessageMode ??\n (isReasoningModel\n ? 'developer'\n : modelCapabilities.systemMessageMode),\n },\n );\n\n warnings.push(...messageWarnings);\n\n const strictJsonSchema = openaiOptions.strictJsonSchema ?? true;\n\n const baseArgs = {\n // model id:\n model: this.modelId,\n\n // model specific settings:\n logit_bias: openaiOptions.logitBias,\n logprobs:\n openaiOptions.logprobs === true ||\n typeof openaiOptions.logprobs === 'number'\n ? true\n : undefined,\n top_logprobs:\n typeof openaiOptions.logprobs === 'number'\n ? openaiOptions.logprobs\n : typeof openaiOptions.logprobs === 'boolean'\n ? openaiOptions.logprobs\n ? 0\n : undefined\n : undefined,\n user: openaiOptions.user,\n parallel_tool_calls: openaiOptions.parallelToolCalls,\n\n // standardized settings:\n max_tokens: maxOutputTokens,\n temperature,\n top_p: topP,\n frequency_penalty: frequencyPenalty,\n presence_penalty: presencePenalty,\n response_format:\n responseFormat?.type === 'json'\n ? responseFormat.schema != null\n ? {\n type: 'json_schema',\n json_schema: {\n schema: responseFormat.schema,\n strict: strictJsonSchema,\n name: responseFormat.name ?? 'response',\n description: responseFormat.description,\n },\n }\n : { type: 'json_object' }\n : undefined,\n stop: stopSequences,\n seed,\n verbosity: openaiOptions.textVerbosity,\n\n // openai specific settings:\n // TODO AI SDK 6: remove, we auto-map maxOutputTokens now\n max_completion_tokens: openaiOptions.maxCompletionTokens,\n store: openaiOptions.store,\n metadata: openaiOptions.metadata,\n prediction: openaiOptions.prediction,\n reasoning_effort: openaiOptions.reasoningEffort,\n service_tier: openaiOptions.serviceTier,\n prompt_cache_key: openaiOptions.promptCacheKey,\n prompt_cache_retention: openaiOptions.promptCacheRetention,\n safety_identifier: openaiOptions.safetyIdentifier,\n\n // messages:\n messages,\n };\n\n // remove unsupported settings for reasoning models\n // see https://platform.openai.com/docs/guides/reasoning#limitations\n if (isReasoningModel) {\n // when reasoning effort is none, gpt-5.1 models allow temperature, topP, logprobs\n // https://platform.openai.com/docs/guides/latest-model#gpt-5-1-parameter-compatibility\n if (\n openaiOptions.reasoningEffort !== 'none' ||\n !modelCapabilities.supportsNonReasoningParameters\n ) {\n if (baseArgs.temperature != null) {\n baseArgs.temperature = undefined;\n warnings.push({\n type: 'unsupported',\n feature: 'temperature',\n details: 'temperature is not supported for reasoning models',\n });\n }\n if (baseArgs.top_p != null) {\n baseArgs.top_p = undefined;\n warnings.push({\n type: 'unsupported',\n feature: 'topP',\n details: 'topP is not supported for reasoning models',\n });\n }\n if (baseArgs.logprobs != null) {\n baseArgs.logprobs = undefined;\n warnings.push({\n type: 'other',\n message: 'logprobs is not supported for reasoning models',\n });\n }\n }\n\n if (baseArgs.frequency_penalty != null) {\n baseArgs.frequency_penalty = undefined;\n warnings.push({\n type: 'unsupported',\n feature: 'frequencyPenalty',\n details: 'frequencyPenalty is not supported for reasoning models',\n });\n }\n if (baseArgs.presence_penalty != null) {\n baseArgs.presence_penalty = undefined;\n warnings.push({\n type: 'unsupported',\n feature: 'presencePenalty',\n details: 'presencePenalty is not supported for reasoning models',\n });\n }\n if (baseArgs.logit_bias != null) {\n baseArgs.logit_bias = undefined;\n warnings.push({\n type: 'other',\n message: 'logitBias is not supported for reasoning models',\n });\n }\n\n if (baseArgs.top_logprobs != null) {\n baseArgs.top_logprobs = undefined;\n warnings.push({\n type: 'other',\n message: 'topLogprobs is not supported for reasoning models',\n });\n }\n\n // reasoning models use max_completion_tokens instead of max_tokens:\n if (baseArgs.max_tokens != null) {\n if (baseArgs.max_completion_tokens == null) {\n baseArgs.max_completion_tokens = baseArgs.max_tokens;\n }\n baseArgs.max_tokens = undefined;\n }\n } else if (\n this.modelId.startsWith('gpt-4o-search-preview') ||\n this.modelId.startsWith('gpt-4o-mini-search-preview')\n ) {\n if (baseArgs.temperature != null) {\n baseArgs.temperature = undefined;\n warnings.push({\n type: 'unsupported',\n feature: 'temperature',\n details:\n 'temperature is not supported for the search preview models and has been removed.',\n });\n }\n }\n\n // Validate flex processing support\n if (\n openaiOptions.serviceTier === 'flex' &&\n !modelCapabilities.supportsFlexProcessing\n ) {\n warnings.push({\n type: 'unsupported',\n feature: 'serviceTier',\n details:\n 'flex processing is only available for o3, o4-mini, and gpt-5 models',\n });\n baseArgs.service_tier = undefined;\n }\n\n // Validate priority processing support\n if (\n openaiOptions.serviceTier === 'priority' &&\n !modelCapabilities.supportsPriorityProcessing\n ) {\n warnings.push({\n type: 'unsupported',\n feature: 'serviceTier',\n details:\n 'priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported',\n });\n baseArgs.service_tier = undefined;\n }\n\n const {\n tools: openaiTools,\n toolChoice: openaiToolChoice,\n toolWarnings,\n } = prepareChatTools({\n tools,\n toolChoice,\n });\n\n return {\n args: {\n ...baseArgs,\n tools: openaiTools,\n tool_choice: openaiToolChoice,\n },\n warnings: [...warnings, ...toolWarnings],\n };\n }\n\n async doGenerate(\n options: LanguageModelV3CallOptions,\n ): Promise<LanguageModelV3GenerateResult> {\n const { args: body, warnings } = await this.getArgs(options);\n\n const {\n responseHeaders,\n value: response,\n rawValue: rawResponse,\n } = await postJsonToApi({\n url: this.config.url({\n path: '/chat/completions',\n modelId: this.modelId,\n }),\n headers: combineHeaders(this.config.headers(), options.headers),\n body,\n failedResponseHandler: openaiFailedResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(\n openaiChatResponseSchema,\n ),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n const choice = response.choices[0];\n const content: Array<LanguageModelV3Content> = [];\n\n // text content:\n const text = choice.message.content;\n if (text != null && text.length > 0) {\n content.push({ type: 'text', text });\n }\n\n // tool calls:\n for (const toolCall of choice.message.tool_calls ?? []) {\n content.push({\n type: 'tool-call' as const,\n toolCallId: toolCall.id ?? generateId(),\n toolName: toolCall.function.name,\n input: toolCall.function.arguments!,\n });\n }\n\n // annotations/citations:\n for (const annotation of choice.message.annotations ?? []) {\n content.push({\n type: 'source',\n sourceType: 'url',\n id: generateId(),\n url: annotation.url_citation.url,\n title: annotation.url_citation.title,\n });\n }\n\n // provider metadata:\n const completionTokenDetails = response.usage?.completion_tokens_details;\n const promptTokenDetails = response.usage?.prompt_tokens_details;\n const providerMetadata: SharedV3ProviderMetadata = { openai: {} };\n if (completionTokenDetails?.accepted_prediction_tokens != null) {\n providerMetadata.openai.acceptedPredictionTokens =\n completionTokenDetails?.accepted_prediction_tokens;\n }\n if (completionTokenDetails?.rejected_prediction_tokens != null) {\n providerMetadata.openai.rejectedPredictionTokens =\n completionTokenDetails?.rejected_prediction_tokens;\n }\n if (choice.logprobs?.content != null) {\n providerMetadata.openai.logprobs = choice.logprobs.content;\n }\n\n return {\n content,\n finishReason: {\n unified: mapOpenAIFinishReason(choice.finish_reason),\n raw: choice.finish_reason ?? undefined,\n },\n usage: convertOpenAIChatUsage(response.usage),\n request: { body },\n response: {\n ...getResponseMetadata(response),\n headers: responseHeaders,\n body: rawResponse,\n },\n warnings,\n providerMetadata,\n };\n }\n\n async doStream(\n options: LanguageModelV3CallOptions,\n ): Promise<LanguageModelV3StreamResult> {\n const { args, warnings } = await this.getArgs(options);\n\n const body = {\n ...args,\n stream: true,\n stream_options: {\n include_usage: true,\n },\n };\n\n const { responseHeaders, value: response } = await postJsonToApi({\n url: this.config.url({\n path: '/chat/completions',\n modelId: this.modelId,\n }),\n headers: combineHeaders(this.config.headers(), options.headers),\n body,\n failedResponseHandler: openaiFailedResponseHandler,\n successfulResponseHandler: createEventSourceResponseHandler(\n openaiChatChunkSchema,\n ),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n const toolCalls: Array<{\n id: string;\n type: 'function';\n function: {\n name: string;\n arguments: string;\n };\n hasFinished: boolean;\n }> = [];\n\n let finishReason: LanguageModelV3FinishReason = {\n unified: 'other',\n raw: undefined,\n };\n let usage: OpenAIChatUsage | undefined = undefined;\n let metadataExtracted = false;\n let isActiveText = false;\n\n const providerMetadata: SharedV3ProviderMetadata = { openai: {} };\n\n return {\n stream: response.pipeThrough(\n new TransformStream<\n ParseResult<OpenAIChatChunk>,\n LanguageModelV3StreamPart\n >({\n start(controller) {\n controller.enqueue({ type: 'stream-start', warnings });\n },\n\n transform(chunk, controller) {\n if (options.includeRawChunks) {\n controller.enqueue({ type: 'raw', rawValue: chunk.rawValue });\n }\n\n // handle failed chunk parsing / validation:\n if (!chunk.success) {\n finishReason = { unified: 'error', raw: undefined };\n controller.enqueue({ type: 'error', error: chunk.error });\n return;\n }\n\n const value = chunk.value;\n\n // handle error chunks:\n if ('error' in value) {\n finishReason = { unified: 'error', raw: undefined };\n controller.enqueue({ type: 'error', error: value.error });\n return;\n }\n\n // extract and emit response metadata once. Usually it comes in the first chunk.\n // Azure may prepend a chunk with a `\"prompt_filter_results\"` key which does not contain other metadata,\n // https://learn.microsoft.com/en-us/azure/ai-foundry/openai/concepts/content-filter-annotations?tabs=powershell\n if (!metadataExtracted) {\n const metadata = getResponseMetadata(value);\n if (Object.values(metadata).some(Boolean)) {\n metadataExtracted = true;\n controller.enqueue({\n type: 'response-metadata',\n ...getResponseMetadata(value),\n });\n }\n }\n\n if (value.usage != null) {\n usage = value.usage;\n\n if (\n value.usage.completion_tokens_details\n ?.accepted_prediction_tokens != null\n ) {\n providerMetadata.openai.acceptedPredictionTokens =\n value.usage.completion_tokens_details?.accepted_prediction_tokens;\n }\n if (\n value.usage.completion_tokens_details\n ?.rejected_prediction_tokens != null\n ) {\n providerMetadata.openai.rejectedPredictionTokens =\n value.usage.completion_tokens_details?.rejected_prediction_tokens;\n }\n }\n\n const choice = value.choices[0];\n\n if (choice?.finish_reason != null) {\n finishReason = {\n unified: mapOpenAIFinishReason(choice.finish_reason),\n raw: choice.finish_reason,\n };\n }\n\n if (choice?.logprobs?.content != null) {\n providerMetadata.openai.logprobs = choice.logprobs.content;\n }\n\n if (choice?.delta == null) {\n return;\n }\n\n const delta = choice.delta;\n\n if (delta.content != null) {\n if (!isActiveText) {\n controller.enqueue({ type: 'text-start', id: '0' });\n isActiveText = true;\n }\n\n controller.enqueue({\n type: 'text-delta',\n id: '0',\n delta: delta.content,\n });\n }\n\n if (delta.tool_calls != null) {\n for (const toolCallDelta of delta.tool_calls) {\n const index = toolCallDelta.index;\n\n // Tool call start. OpenAI returns all information except the arguments in the first chunk.\n if (toolCalls[index] == null) {\n if (toolCallDelta.type !== 'function') {\n throw new InvalidResponseDataError({\n data: toolCallDelta,\n message: `Expected 'function' type.`,\n });\n }\n\n if (toolCallDelta.id == null) {\n throw new InvalidResponseDataError({\n data: toolCallDelta,\n message: `Expected 'id' to be a string.`,\n });\n }\n\n if (toolCallDelta.function?.name == null) {\n throw new InvalidResponseDataError({\n data: toolCallDelta,\n message: `Expected 'function.name' to be a string.`,\n });\n }\n\n controller.enqueue({\n type: 'tool-input-start',\n id: toolCallDelta.id,\n toolName: toolCallDelta.function.name,\n });\n\n toolCalls[index] = {\n id: toolCallDelta.id,\n type: 'function',\n function: {\n name: toolCallDelta.function.name,\n arguments: toolCallDelta.function.arguments ?? '',\n },\n hasFinished: false,\n };\n\n const toolCall = toolCalls[index];\n\n if (\n toolCall.function?.name != null &&\n toolCall.function?.arguments != null\n ) {\n // send delta if the argument text has already started:\n if (toolCall.function.arguments.length > 0) {\n controller.enqueue({\n type: 'tool-input-delta',\n id: toolCall.id,\n delta: toolCall.function.arguments,\n });\n }\n\n // check if tool call is complete\n // (some providers send the full tool call in one chunk):\n if (isParsableJson(toolCall.function.arguments)) {\n controller.enqueue({\n type: 'tool-input-end',\n id: toolCall.id,\n });\n\n controller.enqueue({\n type: 'tool-call',\n toolCallId: toolCall.id ?? generateId(),\n toolName: toolCall.function.name,\n input: toolCall.function.arguments,\n });\n toolCall.hasFinished = true;\n }\n }\n\n continue;\n }\n\n // existing tool call, merge if not finished\n const toolCall = toolCalls[index];\n\n if (toolCall.hasFinished) {\n continue;\n }\n\n if (toolCallDelta.function?.arguments != null) {\n toolCall.function!.arguments +=\n toolCallDelta.function?.arguments ?? '';\n }\n\n // send delta\n controller.enqueue({\n type: 'tool-input-delta',\n id: toolCall.id,\n delta: toolCallDelta.function.arguments ?? '',\n });\n\n // check if tool call is complete\n if (\n toolCall.function?.name != null &&\n toolCall.function?.arguments != null &&\n isParsableJson(toolCall.function.arguments)\n ) {\n controller.enqueue({\n type: 'tool-input-end',\n id: toolCall.id,\n });\n\n controller.enqueue({\n type: 'tool-call',\n toolCallId: toolCall.id ?? generateId(),\n toolName: toolCall.function.name,\n input: toolCall.function.arguments,\n });\n toolCall.hasFinished = true;\n }\n }\n }\n\n // annotations/citations:\n if (delta.annotations != null) {\n for (const annotation of delta.annotations) {\n controller.enqueue({\n type: 'source',\n sourceType: 'url',\n id: generateId(),\n url: annotation.url_citation.url,\n title: annotation.url_citation.title,\n });\n }\n }\n },\n\n flush(controller) {\n if (isActiveText) {\n controller.enqueue({ type: 'text-end', id: '0' });\n }\n\n controller.enqueue({\n type: 'finish',\n finishReason,\n usage: convertOpenAIChatUsage(usage),\n ...(providerMetadata != null ? { providerMetadata } : {}),\n });\n },\n }),\n ),\n request: { body },\n response: { headers: responseHeaders },\n };\n }\n}\n","import { z } from 'zod/v4';\nimport { createJsonErrorResponseHandler } from '@ai-sdk/provider-utils';\n\nexport const openaiErrorDataSchema = z.object({\n error: z.object({\n message: z.string(),\n\n // The additional information below is handled loosely to support\n // OpenAI-compatible providers that have slightly different error\n // responses:\n type: z.string().nullish(),\n param: z.any().nullish(),\n code: z.union([z.string(), z.number()]).nullish(),\n }),\n});\n\nexport type OpenAIErrorData = z.infer<typeof openaiErrorDataSchema>;\n\nexport const openaiFailedResponseHandler = createJsonErrorResponseHandler({\n errorSchema: openaiErrorDataSchema,\n errorToMessage: data => data.error.message,\n});\n","export type OpenAILanguageModelCapabilities = {\n isReasoningModel: boolean;\n systemMessageMode: 'remove' | 'system' | 'developer';\n supportsFlexProcessing: boolean;\n supportsPriorityProcessing: boolean;\n\n /**\n * Allow temperature, topP, logProbs when reasoningEffort is none.\n */\n supportsNonReasoningParameters: boolean;\n};\n\nexport function getOpenAILanguageModelCapabilities(\n modelId: string,\n): OpenAILanguageModelCapabilities {\n const supportsFlexProcessing =\n modelId.startsWith('o3') ||\n modelId.startsWith('o4-mini') ||\n (modelId.startsWith('gpt-5') && !modelId.startsWith('gpt-5-chat'));\n\n const supportsPriorityProcessing =\n modelId.startsWith('gpt-4') ||\n modelId.startsWith('gpt-5-mini') ||\n (modelId.startsWith('gpt-5') &&\n !modelId.startsWith('gpt-5-nano') &&\n !modelId.startsWith('gpt-5-chat')) ||\n modelId.startsWith('o3') ||\n modelId.startsWith('o4-mini');\n\n // Use allowlist approach: only known reasoning models should use 'developer' role\n // This prevents issues with fine-tuned models, third-party models, and custom models\n const isReasoningModel =\n modelId.startsWith('o1') ||\n modelId.startsWith('o3') ||\n modelId.startsWith('o4-mini') ||\n modelId.startsWith('codex-mini') ||\n modelId.startsWith('computer-use-preview') ||\n (modelId.startsWith('gpt-5') && !modelId.startsWith('gpt-5-chat'));\n\n // https://platform.openai.com/docs/guides/latest-model#gpt-5-1-parameter-compatibility\n // GPT-5.1 and GPT-5.2 support temperature, topP, logProbs when reasoningEffort is none\n const supportsNonReasoningParameters =\n modelId.startsWith('gpt-5.1') || modelId.startsWith('gpt-5.2');\n\n const systemMessageMode = isReasoningModel ? 'developer' : 'system';\n\n return {\n supportsFlexProcessing,\n supportsPriorityProcessing,\n isReasoningModel,\n systemMessageMode,\n supportsNonReasoningParameters,\n };\n}\n","import { LanguageModelV3Usage } from '@ai-sdk/provider';\n\nexport type OpenAIChatUsage = {\n prompt_tokens?: number | null;\n completion_tokens?: number | null;\n total_tokens?: number | null;\n prompt_tokens_details?: {\n cached_tokens?: number | null;\n } | null;\n completion_tokens_details?: {\n reasoning_tokens?: number | null;\n accepted_prediction_tokens?: number | null;\n rejected_prediction_tokens?: number | null;\n } | null;\n};\n\nexport function convertOpenAIChatUsage(\n usage: OpenAIChatUsage | undefined | null,\n): LanguageModelV3Usage {\n if (usage == null) {\n return {\n inputTokens: {\n total: undefined,\n noCache: undefined,\n cacheRead: undefined,\n cacheWrite: undefined,\n },\n outputTokens: {\n total: undefined,\n text: undefined,\n reasoning: undefined,\n },\n raw: undefined,\n };\n }\n\n const promptTokens = usage.prompt_tokens ?? 0;\n const completionTokens = usage.completion_tokens ?? 0;\n const cachedTokens = usage.prompt_tokens_details?.cached_tokens ?? 0;\n const reasoningTokens =\n usage.completion_tokens_details?.reasoning_tokens ?? 0;\n\n return {\n inputTokens: {\n total: promptTokens,\n noCache: promptTokens - cachedTokens,\n cacheRead: cachedTokens,\n cacheWrite: undefined,\n },\n outputTokens: {\n total: completionTokens,\n text: completionTokens - reasoningTokens,\n reasoning: reasoningTokens,\n },\n raw: usage,\n };\n}\n","import {\n SharedV3Warning,\n LanguageModelV3Prompt,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport { OpenAIChatPrompt } from './openai-chat-prompt';\nimport { convertToBase64 } from '@ai-sdk/provider-utils';\n\nexport function convertToOpenAIChatMessages({\n prompt,\n systemMessageMode = 'system',\n}: {\n prompt: LanguageModelV3Prompt;\n systemMessageMode?: 'system' | 'developer' | 'remove';\n}): {\n messages: OpenAIChatPrompt;\n warnings: Array<SharedV3Warning>;\n} {\n const messages: OpenAIChatPrompt = [];\n const warnings: Array<SharedV3Warning> = [];\n\n for (const { role, content } of prompt) {\n switch (role) {\n case 'system': {\n switch (systemMessageMode) {\n case 'system': {\n messages.push({ role: 'system', content });\n break;\n }\n case 'developer': {\n messages.push({ role: 'developer', content });\n break;\n }\n case 'remove': {\n warnings.push({\n type: 'other',\n message: 'system messages are removed for this model',\n });\n break;\n }\n default: {\n const _exhaustiveCheck: never = systemMessageMode;\n throw new Error(\n `Unsupported system message mode: ${_exhaustiveCheck}`,\n );\n }\n }\n break;\n }\n\n case 'user': {\n if (content.length === 1 && content[0].type === 'text') {\n messages.push({ role: 'user', content: content[0].text });\n break;\n }\n\n messages.push({\n role: 'user',\n content: content.map((part, index) => {\n switch (part.type) {\n case 'text': {\n return { type: 'text', text: part.text };\n }\n case 'file': {\n if (part.mediaType.startsWith('image/')) {\n const mediaType =\n part.mediaType === 'image/*'\n ? 'image/jpeg'\n : part.mediaType;\n\n return {\n type: 'image_url',\n image_url: {\n url:\n part.data instanceof URL\n ? part.data.toString()\n : `data:${mediaType};base64,${convertToBase64(part.data)}`,\n\n // OpenAI specific extension: image detail\n detail: part.providerOptions?.openai?.imageDetail,\n },\n };\n } else if (part.mediaType.startsWith('audio/')) {\n if (part.data instanceof URL) {\n throw new UnsupportedFunctionalityError({\n functionality: 'audio file parts with URLs',\n });\n }\n\n switch (part.mediaType) {\n case 'audio/wav': {\n return {\n type: 'input_audio',\n input_audio: {\n data: convertToBase64(part.data),\n format: 'wav',\n },\n };\n }\n case 'audio/mp3':\n case 'audio/mpeg': {\n return {\n type: 'input_audio',\n input_audio: {\n data: convertToBase64(part.data),\n format: 'mp3',\n },\n };\n }\n\n default: {\n throw new UnsupportedFunctionalityError({\n functionality: `audio content parts with media type ${part.mediaType}`,\n });\n }\n }\n } else if (part.mediaType === 'application/pdf') {\n if (part.data instanceof URL) {\n throw new UnsupportedFunctionalityError({\n functionality: 'PDF file parts with URLs',\n });\n }\n\n return {\n type: 'file',\n file:\n typeof part.data === 'string' &&\n part.data.startsWith('file-')\n ? { file_id: part.data }\n : {\n filename: part.filename ?? `part-${index}.pdf`,\n file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`,\n },\n };\n } else {\n throw new UnsupportedFunctionalityError({\n functionality: `file part media type ${part.mediaType}`,\n });\n }\n }\n }\n }),\n });\n\n break;\n }\n\n case 'assistant': {\n let text = '';\n const toolCalls: Array<{\n id: string;\n type: 'function';\n function: { name: string; arguments: string };\n }> = [];\n\n for (const part of content) {\n switch (part.type) {\n case 'text': {\n text += part.text;\n break;\n }\n case 'tool-call': {\n toolCalls.push({\n id: part.toolCallId,\n type: 'function',\n function: {\n name: part.toolName,\n arguments: JSON.stringify(part.input),\n },\n });\n break;\n }\n }\n }\n\n messages.push({\n role: 'assistant',\n content: text,\n tool_calls: toolCalls.length > 0 ? toolCalls : undefined,\n });\n\n break;\n }\n\n case 'tool': {\n for (const toolResponse of content) {\n if (toolResponse.type === 'tool-approval-response') {\n continue;\n }\n const output = toolResponse.output;\n\n let contentValue: string;\n switch (output.type) {\n case 'text':\n case 'error-text':\n contentValue = output.value;\n break;\n case 'execution-denied':\n contentValue = output.reason ?? 'Tool execution denied.';\n break;\n case 'content':\n case 'json':\n case 'error-json':\n contentValue = JSON.stringify(output.value);\n break;\n }\n\n messages.push({\n role: 'tool',\n tool_call_id: toolResponse.toolCallId,\n content: contentValue,\n });\n }\n break;\n }\n\n default: {\n const _exhaustiveCheck: never = role;\n throw new Error(`Unsupported role: ${_exhaustiveCheck}`);\n }\n }\n }\n\n return { messages, warnings };\n}\n","export function getResponseMetadata({\n id,\n model,\n created,\n}: {\n id?: string | undefined | null;\n created?: number | undefined | null;\n model?: string | undefined | null;\n}) {\n return {\n id: id ?? undefined,\n modelId: model ?? undefined,\n timestamp: created ? new Date(created * 1000) : undefined,\n };\n}\n","import { LanguageModelV3FinishReason } from '@ai-sdk/provider';\n\nexport function mapOpenAIFinishReason(\n finishReason: string | null | undefined,\n): LanguageModelV3FinishReason['unified'] {\n switch (finishReason) {\n case 'stop':\n return 'stop';\n case 'length':\n return 'length';\n case 'content_filter':\n return 'content-filter';\n case 'function_call':\n case 'tool_calls':\n return 'tool-calls';\n default:\n return 'other';\n }\n}\n","import { JSONSchema7 } from '@ai-sdk/provider';\nimport { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport { openaiErrorDataSchema } from '../openai-error';\n\nexport interface OpenAIChatFunctionTool {\n type: 'function';\n function: {\n name: string;\n description: string | undefined;\n parameters: JSONSchema7;\n strict?: boolean;\n };\n}\n\nexport type OpenAIChatToolChoice =\n | 'auto'\n | 'none'\n | 'required'\n | { type: 'function'; function: { name: string } };\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nexport const openaiChatResponseSchema = lazySchema(() =>\n zodSchema(\n z.object({\n id: z.string().nullish(),\n created: z.number().nullish(),\n model: z.string().nullish(),\n choices: z.array(\n z.object({\n message: z.object({\n role: z.literal('assistant').nullish(),\n content: z.string().nullish(),\n tool_calls: z\n .array(\n z.object({\n id: z.string().nullish(),\n type: z.literal('function'),\n function: z.object({\n name: z.string(),\n arguments: z.string(),\n }),\n }),\n )\n .nullish(),\n annotations: z\n .array(\n z.object({\n type: z.literal('url_citation'),\n url_citation: z.object({\n start_index: z.number(),\n end_index: z.number(),\n url: z.string(),\n title: z.string(),\n }),\n }),\n )\n .nullish(),\n }),\n index: z.number(),\n logprobs: z\n .object({\n content: z\n .array(\n z.object({\n token: z.string(),\n logprob: z.number(),\n top_logprobs: z.array(\n z.object({\n token: z.string(),\n logprob: z.number(),\n }),\n ),\n }),\n )\n .nullish(),\n })\n .nullish(),\n finish_reason: z.string().nullish(),\n }),\n ),\n usage: z\n .object({\n prompt_tokens: z.number().nullish(),\n completion_tokens: z.number().nullish(),\n total_tokens: z.number().nullish(),\n prompt_tokens_details: z\n .object({\n cached_tokens: z.number().nullish(),\n })\n .nullish(),\n completion_tokens_details: z\n .object({\n reasoning_tokens: z.number().nullish(),\n accepted_prediction_tokens: z.number().nullish(),\n rejected_prediction_tokens: z.number().nullish(),\n })\n .nullish(),\n })\n .nullish(),\n }),\n ),\n);\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nexport const openaiChatChunkSchema = lazySchema(() =>\n zodSchema(\n z.union([\n z.object({\n id: z.string().nullish(),\n created: z.number().nullish(),\n model: z.string().nullish(),\n choices: z.array(\n z.object({\n delta: z\n .object({\n role: z.enum(['assistant']).nullish(),\n content: z.string().nullish(),\n tool_calls: z\n .array(\n z.object({\n index: z.number(),\n id: z.string().nullish(),\n type: z.literal('function').nullish(),\n function: z.object({\n name: z.string().nullish(),\n arguments: z.string().nullish(),\n }),\n }),\n )\n .nullish(),\n annotations: z\n .array(\n z.object({\n type: z.literal('url_citation'),\n url_citation: z.object({\n start_index: z.number(),\n end_index: z.number(),\n url: z.string(),\n title: z.string(),\n }),\n }),\n )\n .nullish(),\n })\n .nullish(),\n logprobs: z\n .object({\n content: z\n .array(\n