@ai-sdk/openai
Version:
The **[OpenAI provider](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the OpenAI chat and completion APIs and embedding model support for the OpenAI embeddings API.
178 lines (166 loc) • 7.29 kB
TypeScript
import { ProviderV2, LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
import { FetchFunction } from '@ai-sdk/provider-utils';
import { z } from 'zod/v4';
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
declare const openaiTools: {
codeInterpreter: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
container?: string | {
fileIds?: string[];
};
}>;
fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{
query: string;
}, {
vectorStoreIds?: string[];
maxNumResults?: number;
ranking?: {
ranker?: "auto" | "default-2024-08-21";
};
filters?: {
key: string;
type: "eq" | "ne" | "gt" | "gte" | "lt" | "lte";
value: string | number | boolean;
} | {
type: "and" | "or";
filters: any[];
};
}>;
webSearchPreview: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
searchContextSize?: "low" | "medium" | "high";
userLocation?: {
type: "approximate";
country?: string;
city?: string;
region?: string;
timezone?: string;
};
}>;
};
type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
interface OpenAIProvider extends ProviderV2 {
(modelId: OpenAIResponsesModelId): LanguageModelV2;
/**
Creates an OpenAI model for text generation.
*/
languageModel(modelId: OpenAIResponsesModelId): LanguageModelV2;
/**
Creates an OpenAI chat model for text generation.
*/
chat(modelId: OpenAIChatModelId): LanguageModelV2;
/**
Creates an OpenAI responses API model for text generation.
*/
responses(modelId: OpenAIResponsesModelId): LanguageModelV2;
/**
Creates an OpenAI completion model for text generation.
*/
completion(modelId: OpenAICompletionModelId): LanguageModelV2;
/**
Creates a model for text embeddings.
*/
embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
/**
Creates a model for text embeddings.
*/
textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
/**
Creates a model for text embeddings.
*/
textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
/**
Creates a model for image generation.
*/
image(modelId: OpenAIImageModelId): ImageModelV2;
/**
Creates a model for image generation.
*/
imageModel(modelId: OpenAIImageModelId): ImageModelV2;
/**
Creates a model for transcription.
*/
transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV2;
/**
Creates a model for speech generation.
*/
speech(modelId: OpenAISpeechModelId): SpeechModelV2;
/**
OpenAI-specific tools.
*/
tools: typeof openaiTools;
}
interface OpenAIProviderSettings {
/**
Base URL for the OpenAI API calls.
*/
baseURL?: string;
/**
API key for authenticating requests.
*/
apiKey?: string;
/**
OpenAI Organization.
*/
organization?: string;
/**
OpenAI project.
*/
project?: string;
/**
Custom headers to include in the requests.
*/
headers?: Record<string, string>;
/**
Provider name. Overrides the `openai` default name for 3rd party providers.
*/
name?: string;
/**
Custom fetch implementation. You can use it as a middleware to intercept requests,
or to provide a custom fetch implementation for e.g. testing.
*/
fetch?: FetchFunction;
}
/**
Create an OpenAI provider instance.
*/
declare function createOpenAI(options?: OpenAIProviderSettings): OpenAIProvider;
/**
Default OpenAI provider instance.
*/
declare const openai: OpenAIProvider;
declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
auto: "auto";
flex: "flex";
priority: "priority";
}>>>;
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
"reasoning.encrypted_content": "reasoning.encrypted_content";
"file_search_call.results": "file_search_call.results";
"message.output_text.logprobs": "message.output_text.logprobs";
}>>>>;
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
low: "low";
medium: "medium";
high: "high";
}>>>;
promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
}, z.core.$strip>;
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
export { type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, createOpenAI, openai };