UNPKG

@ai-sdk/openai

Version:

The **[OpenAI provider](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the OpenAI chat and completion APIs and embedding model support for the OpenAI embeddings API.

225 lines (208 loc) 12.4 kB
import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider'; import { FetchFunction } from '@ai-sdk/provider-utils'; import { z } from 'zod/v4'; type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {}); declare const openaiProviderOptions: z.ZodObject<{ logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>; logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>; parallelToolCalls: z.ZodOptional<z.ZodBoolean>; user: z.ZodOptional<z.ZodString>; reasoningEffort: z.ZodOptional<z.ZodEnum<{ low: "low"; medium: "medium"; high: "high"; }>>; maxCompletionTokens: z.ZodOptional<z.ZodNumber>; store: z.ZodOptional<z.ZodBoolean>; metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>; prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>; structuredOutputs: z.ZodOptional<z.ZodBoolean>; serviceTier: z.ZodOptional<z.ZodEnum<{ auto: "auto"; flex: "flex"; priority: "priority"; }>>; strictJsonSchema: z.ZodOptional<z.ZodBoolean>; }, z.core.$strip>; type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>; type OpenAIChatConfig = { provider: string; headers: () => Record<string, string | undefined>; url: (options: { modelId: string; path: string; }) => string; fetch?: FetchFunction; }; declare class OpenAIChatLanguageModel implements LanguageModelV2 { readonly specificationVersion = "v2"; readonly modelId: OpenAIChatModelId; readonly supportedUrls: { 'image/*': RegExp[]; }; private readonly config; constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig); get provider(): string; private getArgs; doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>; doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>; } type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {}); declare const openaiCompletionProviderOptions: z.ZodObject<{ echo: z.ZodOptional<z.ZodBoolean>; logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>; suffix: z.ZodOptional<z.ZodString>; user: z.ZodOptional<z.ZodString>; logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>; }, z.core.$strip>; type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>; type OpenAICompletionConfig = { provider: string; headers: () => Record<string, string | undefined>; url: (options: { modelId: string; path: string; }) => string; fetch?: FetchFunction; }; declare class OpenAICompletionLanguageModel implements LanguageModelV2 { readonly specificationVersion = "v2"; readonly modelId: OpenAICompletionModelId; private readonly config; private get providerOptionsName(); constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig); get provider(): string; readonly supportedUrls: Record<string, RegExp[]>; private getArgs; doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>; doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>; } type OpenAIConfig = { provider: string; url: (options: { modelId: string; path: string; }) => string; headers: () => Record<string, string | undefined>; fetch?: FetchFunction; generateId?: () => string; }; type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {}); declare const openaiEmbeddingProviderOptions: z.ZodObject<{ dimensions: z.ZodOptional<z.ZodNumber>; user: z.ZodOptional<z.ZodString>; }, z.core.$strip>; type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOptions>; declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> { readonly specificationVersion = "v2"; readonly modelId: OpenAIEmbeddingModelId; readonly maxEmbeddingsPerCall = 2048; readonly supportsParallelCalls = true; private readonly config; get provider(): string; constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig); doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>; } type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {}); declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>; declare const hasDefaultResponseFormat: Set<string>; interface OpenAIImageModelConfig extends OpenAIConfig { _internal?: { currentDate?: () => Date; }; } declare class OpenAIImageModel implements ImageModelV2 { readonly modelId: OpenAIImageModelId; private readonly config; readonly specificationVersion = "v2"; get maxImagesPerCall(): number; get provider(): string; constructor(modelId: OpenAIImageModelId, config: OpenAIImageModelConfig); doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV2['doGenerate']>>>; } type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {}); declare const openAITranscriptionProviderOptions: z.ZodObject<{ include: z.ZodOptional<z.ZodArray<z.ZodString>>; language: z.ZodOptional<z.ZodString>; prompt: z.ZodOptional<z.ZodString>; temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>; timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<{ word: "word"; segment: "segment"; }>>>>; }, z.core.$strip>; type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>; type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV2CallOptions, 'providerOptions'> & { providerOptions?: { openai?: OpenAITranscriptionProviderOptions; }; }; interface OpenAITranscriptionModelConfig extends OpenAIConfig { _internal?: { currentDate?: () => Date; }; } declare class OpenAITranscriptionModel implements TranscriptionModelV2 { readonly modelId: OpenAITranscriptionModelId; private readonly config; readonly specificationVersion = "v2"; get provider(): string; constructor(modelId: OpenAITranscriptionModelId, config: OpenAITranscriptionModelConfig); private getArgs; doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV2['doGenerate']>>>; } type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {}); declare const OpenAIProviderOptionsSchema: z.ZodObject<{ instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>; speed: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>; }, z.core.$strip>; type OpenAISpeechCallOptions = z.infer<typeof OpenAIProviderOptionsSchema>; interface OpenAISpeechModelConfig extends OpenAIConfig { _internal?: { currentDate?: () => Date; }; } declare class OpenAISpeechModel implements SpeechModelV2 { readonly modelId: OpenAISpeechModelId; private readonly config; readonly specificationVersion = "v2"; get provider(): string; constructor(modelId: OpenAISpeechModelId, config: OpenAISpeechModelConfig); private getArgs; doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>; } declare const openaiResponsesModelIds: readonly ["gpt-4.1", "gpt-4.1-2025-04-14", "gpt-4.1-mini", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano", "gpt-4.1-nano-2025-04-14", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01", "gpt-4o-audio-preview-2024-12-17", "gpt-4o-search-preview", "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview", "gpt-4o-mini-search-preview-2025-03-11", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-0613", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "chatgpt-4o-latest", "o1", "o1-2024-12-17", "o1-mini", "o1-mini-2024-09-12", "o1-preview", "o1-preview-2024-09-12", "o3-mini", "o3-mini-2025-01-31", "o3", "o3-2025-04-16", "o4-mini", "o4-mini-2025-04-16", "codex-mini-latest", "computer-use-preview"]; type OpenAIResponsesModelId = (typeof openaiResponsesModelIds)[number] | (string & {}); declare class OpenAIResponsesLanguageModel implements LanguageModelV2 { readonly specificationVersion = "v2"; readonly modelId: OpenAIResponsesModelId; private readonly config; constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig); readonly supportedUrls: Record<string, RegExp[]>; get provider(): string; private getArgs; doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>; doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>; } declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{ metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>; parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>; previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>; store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>; user: z.ZodOptional<z.ZodNullable<z.ZodString>>; reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>; strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>; instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>; reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>; serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{ auto: "auto"; flex: "flex"; priority: "priority"; }>>>; include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{ "reasoning.encrypted_content": "reasoning.encrypted_content"; "file_search_call.results": "file_search_call.results"; }>>>>; }, z.core.$strip>; type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>; export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };