@ai-sdk/openai
Version:
The **[OpenAI provider](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the OpenAI chat and completion APIs and embedding model support for the OpenAI embeddings API.
187 lines (174 loc) • 8.3 kB
TypeScript
import { LanguageModelV2, ProviderV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
import { FetchFunction } from '@ai-sdk/provider-utils';
import { z } from 'zod/v4';
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
declare const openaiTools: {
fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{
query: string;
}, {
vectorStoreIds?: string[];
maxNumResults?: number;
ranking?: {
ranker?: "auto" | "default-2024-08-21";
};
filters?: {
key: string;
type: "eq" | "ne" | "gt" | "gte" | "lt" | "lte";
value: string | number | boolean;
} | {
type: "and" | "or";
filters: any[];
};
}>;
webSearchPreview: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
searchContextSize?: "low" | "medium" | "high";
userLocation?: {
type: "approximate";
country?: string;
city?: string;
region?: string;
timezone?: string;
};
}>;
};
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
type OpenAIConfig = {
provider: string;
url: (options: {
modelId: string;
path: string;
}) => string;
headers: () => Record<string, string | undefined>;
fetch?: FetchFunction;
generateId?: () => string;
};
declare const openaiResponsesModelIds: readonly ["gpt-4.1", "gpt-4.1-2025-04-14", "gpt-4.1-mini", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano", "gpt-4.1-nano-2025-04-14", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01", "gpt-4o-audio-preview-2024-12-17", "gpt-4o-search-preview", "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview", "gpt-4o-mini-search-preview-2025-03-11", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-0613", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "chatgpt-4o-latest", "o1", "o1-2024-12-17", "o1-mini", "o1-mini-2024-09-12", "o1-preview", "o1-preview-2024-09-12", "o3-mini", "o3-mini-2025-01-31", "o3", "o3-2025-04-16", "o4-mini", "o4-mini-2025-04-16", "codex-mini-latest", "computer-use-preview"];
type OpenAIResponsesModelId = (typeof openaiResponsesModelIds)[number] | (string & {});
declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
readonly specificationVersion = "v2";
readonly modelId: OpenAIResponsesModelId;
private readonly config;
constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
readonly supportedUrls: Record<string, RegExp[]>;
get provider(): string;
private getArgs;
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
}
declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
auto: "auto";
flex: "flex";
priority: "priority";
}>>>;
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
"reasoning.encrypted_content": "reasoning.encrypted_content";
"file_search_call.results": "file_search_call.results";
}>>>>;
}, z.core.$strip>;
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
interface OpenAIProvider extends ProviderV2 {
(modelId: OpenAIResponsesModelId): LanguageModelV2;
/**
Creates an OpenAI model for text generation.
*/
languageModel(modelId: OpenAIResponsesModelId): OpenAIResponsesLanguageModel;
/**
Creates an OpenAI chat model for text generation.
*/
chat(modelId: OpenAIChatModelId): LanguageModelV2;
/**
Creates an OpenAI responses API model for text generation.
*/
responses(modelId: OpenAIResponsesModelId): LanguageModelV2;
/**
Creates an OpenAI completion model for text generation.
*/
completion(modelId: OpenAICompletionModelId): LanguageModelV2;
/**
Creates a model for text embeddings.
*/
embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
/**
Creates a model for text embeddings.
*/
textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
/**
Creates a model for text embeddings.
*/
textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
/**
Creates a model for image generation.
*/
image(modelId: OpenAIImageModelId): ImageModelV2;
/**
Creates a model for image generation.
*/
imageModel(modelId: OpenAIImageModelId): ImageModelV2;
/**
Creates a model for transcription.
*/
transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV2;
/**
Creates a model for speech generation.
*/
speech(modelId: OpenAISpeechModelId): SpeechModelV2;
/**
OpenAI-specific tools.
*/
tools: typeof openaiTools;
}
interface OpenAIProviderSettings {
/**
Base URL for the OpenAI API calls.
*/
baseURL?: string;
/**
API key for authenticating requests.
*/
apiKey?: string;
/**
OpenAI Organization.
*/
organization?: string;
/**
OpenAI project.
*/
project?: string;
/**
Custom headers to include in the requests.
*/
headers?: Record<string, string>;
/**
Provider name. Overrides the `openai` default name for 3rd party providers.
*/
name?: string;
/**
Custom fetch implementation. You can use it as a middleware to intercept requests,
or to provide a custom fetch implementation for e.g. testing.
*/
fetch?: FetchFunction;
}
/**
Create an OpenAI provider instance.
*/
declare function createOpenAI(options?: OpenAIProviderSettings): OpenAIProvider;
/**
Default OpenAI provider instance.
*/
declare const openai: OpenAIProvider;
export { type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, createOpenAI, openai };