UNPKG

@ai-sdk/openai

Version:

The **[OpenAI provider](https://sdk.vercel.ai/providers/ai-sdk-providers/openai)** for the [AI SDK](https://sdk.vercel.ai/docs) contains language model support for the OpenAI chat and completion APIs and embedding model support for the OpenAI embeddings A

1,393 lines (1,375 loc) 44.5 kB
// src/openai-facade.ts import { loadApiKey, withoutTrailingSlash } from "@ai-sdk/provider-utils"; // src/openai-chat-language-model.ts import { InvalidResponseDataError, UnsupportedFunctionalityError as UnsupportedFunctionalityError2 } from "@ai-sdk/provider"; import { combineHeaders, createEventSourceResponseHandler, createJsonResponseHandler, generateId, isParsableJson, postJsonToApi } from "@ai-sdk/provider-utils"; import { z as z2 } from "zod"; // src/convert-to-openai-chat-messages.ts import { UnsupportedFunctionalityError } from "@ai-sdk/provider"; import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils"; function convertToOpenAIChatMessages({ prompt, useLegacyFunctionCalling = false }) { const messages = []; for (const { role, content } of prompt) { switch (role) { case "system": { messages.push({ role: "system", content }); break; } case "user": { if (content.length === 1 && content[0].type === "text") { messages.push({ role: "user", content: content[0].text }); break; } messages.push({ role: "user", content: content.map((part) => { var _a, _b, _c; switch (part.type) { case "text": { return { type: "text", text: part.text }; } case "image": { return { type: "image_url", image_url: { url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`, // OpenAI specific extension: image detail detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail } }; } case "file": { throw new UnsupportedFunctionalityError({ functionality: "File content parts in user messages" }); } } }) }); break; } case "assistant": { let text = ""; const toolCalls = []; for (const part of content) { switch (part.type) { case "text": { text += part.text; break; } case "tool-call": { toolCalls.push({ id: part.toolCallId, type: "function", function: { name: part.toolName, arguments: JSON.stringify(part.args) } }); break; } default: { const _exhaustiveCheck = part; throw new Error(`Unsupported part: ${_exhaustiveCheck}`); } } } if (useLegacyFunctionCalling) { if (toolCalls.length > 1) { throw new UnsupportedFunctionalityError({ functionality: "useLegacyFunctionCalling with multiple tool calls in one message" }); } messages.push({ role: "assistant", content: text, function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0 }); } else { messages.push({ role: "assistant", content: text, tool_calls: toolCalls.length > 0 ? toolCalls : void 0 }); } break; } case "tool": { for (const toolResponse of content) { if (useLegacyFunctionCalling) { messages.push({ role: "function", name: toolResponse.toolName, content: JSON.stringify(toolResponse.result) }); } else { messages.push({ role: "tool", tool_call_id: toolResponse.toolCallId, content: JSON.stringify(toolResponse.result) }); } } break; } default: { const _exhaustiveCheck = role; throw new Error(`Unsupported role: ${_exhaustiveCheck}`); } } } return messages; } // src/map-openai-chat-logprobs.ts function mapOpenAIChatLogProbsOutput(logprobs) { var _a, _b; return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({ token, logprob, topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({ token: token2, logprob: logprob2 })) : [] }))) != null ? _b : void 0; } // src/map-openai-finish-reason.ts function mapOpenAIFinishReason(finishReason) { switch (finishReason) { case "stop": return "stop"; case "length": return "length"; case "content_filter": return "content-filter"; case "function_call": case "tool_calls": return "tool-calls"; default: return "unknown"; } } // src/openai-error.ts import { z } from "zod"; import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils"; var openAIErrorDataSchema = z.object({ error: z.object({ message: z.string(), // The additional information below is handled loosely to support // OpenAI-compatible providers that have slightly different error // responses: type: z.string().nullish(), param: z.any().nullish(), code: z.union([z.string(), z.number()]).nullish() }) }); var openaiFailedResponseHandler = createJsonErrorResponseHandler({ errorSchema: openAIErrorDataSchema, errorToMessage: (data) => data.error.message }); // src/get-response-metadata.ts function getResponseMetadata({ id, model, created }) { return { id: id != null ? id : void 0, modelId: model != null ? model : void 0, timestamp: created != null ? new Date(created * 1e3) : void 0 }; } // src/openai-chat-language-model.ts var OpenAIChatLanguageModel = class { constructor(modelId, settings, config) { this.specificationVersion = "v1"; this.modelId = modelId; this.settings = settings; this.config = config; } get supportsStructuredOutputs() { return this.settings.structuredOutputs === true; } get defaultObjectGenerationMode() { return this.supportsStructuredOutputs ? "json" : "tool"; } get provider() { return this.config.provider; } get supportsImageUrls() { return !this.settings.downloadImages; } getArgs({ mode, prompt, maxTokens, temperature, topP, topK, frequencyPenalty, presencePenalty, stopSequences, responseFormat, seed, providerMetadata }) { var _a, _b, _c; const type = mode.type; const warnings = []; if (topK != null) { warnings.push({ type: "unsupported-setting", setting: "topK" }); } if (responseFormat != null && responseFormat.type === "json" && responseFormat.schema != null) { warnings.push({ type: "unsupported-setting", setting: "responseFormat", details: "JSON response format schema is not supported" }); } const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling; if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) { throw new UnsupportedFunctionalityError2({ functionality: "useLegacyFunctionCalling with parallelToolCalls" }); } if (useLegacyFunctionCalling && this.settings.structuredOutputs === true) { throw new UnsupportedFunctionalityError2({ functionality: "structuredOutputs with useLegacyFunctionCalling" }); } const baseArgs = { // model id: model: this.modelId, // model specific settings: logit_bias: this.settings.logitBias, logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0, top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0, user: this.settings.user, parallel_tool_calls: this.settings.parallelToolCalls, // standardized settings: max_tokens: maxTokens, temperature, top_p: topP, frequency_penalty: frequencyPenalty, presence_penalty: presencePenalty, stop: stopSequences, seed, // openai specific settings: max_completion_tokens: (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.maxCompletionTokens) != null ? _b : void 0, // response format: response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0, // messages: messages: convertToOpenAIChatMessages({ prompt, useLegacyFunctionCalling }) }; if (isReasoningModel(this.modelId)) { baseArgs.temperature = void 0; baseArgs.top_p = void 0; baseArgs.frequency_penalty = void 0; baseArgs.presence_penalty = void 0; } switch (type) { case "regular": { return { args: { ...baseArgs, ...prepareToolsAndToolChoice({ mode, useLegacyFunctionCalling, structuredOutputs: this.settings.structuredOutputs }) }, warnings }; } case "object-json": { return { args: { ...baseArgs, response_format: this.settings.structuredOutputs === true ? { type: "json_schema", json_schema: { schema: mode.schema, strict: true, name: (_c = mode.name) != null ? _c : "response", description: mode.description } } : { type: "json_object" } }, warnings }; } case "object-tool": { return { args: useLegacyFunctionCalling ? { ...baseArgs, function_call: { name: mode.tool.name }, functions: [ { name: mode.tool.name, description: mode.tool.description, parameters: mode.tool.parameters } ] } : { ...baseArgs, tool_choice: { type: "function", function: { name: mode.tool.name } }, tools: [ { type: "function", function: { name: mode.tool.name, description: mode.tool.description, parameters: mode.tool.parameters, strict: this.settings.structuredOutputs === true ? true : void 0 } } ] }, warnings }; } default: { const _exhaustiveCheck = type; throw new Error(`Unsupported type: ${_exhaustiveCheck}`); } } } async doGenerate(options) { var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j; const { args, warnings } = this.getArgs(options); const { responseHeaders, value: response } = await postJsonToApi({ url: this.config.url({ path: "/chat/completions", modelId: this.modelId }), headers: combineHeaders(this.config.headers(), options.headers), body: args, failedResponseHandler: openaiFailedResponseHandler, successfulResponseHandler: createJsonResponseHandler( openAIChatResponseSchema ), abortSignal: options.abortSignal, fetch: this.config.fetch }); const { messages: rawPrompt, ...rawSettings } = args; const choice = response.choices[0]; const providerMetadata = ((_b = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? { openai: { reasoningTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens } } : void 0; return { text: (_e = choice.message.content) != null ? _e : void 0, toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [ { toolCallType: "function", toolCallId: generateId(), toolName: choice.message.function_call.name, args: choice.message.function_call.arguments } ] : (_f = choice.message.tool_calls) == null ? void 0 : _f.map((toolCall) => { var _a2; return { toolCallType: "function", toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(), toolName: toolCall.function.name, args: toolCall.function.arguments }; }), finishReason: mapOpenAIFinishReason(choice.finish_reason), usage: { promptTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : NaN, completionTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : NaN }, rawCall: { rawPrompt, rawSettings }, rawResponse: { headers: responseHeaders }, response: getResponseMetadata(response), warnings, logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs), providerMetadata }; } async doStream(options) { if (isReasoningModel(this.modelId)) { const result = await this.doGenerate(options); const simulatedStream = new ReadableStream({ start(controller) { controller.enqueue({ type: "response-metadata", ...result.response }); if (result.text) { controller.enqueue({ type: "text-delta", textDelta: result.text }); } if (result.toolCalls) { for (const toolCall of result.toolCalls) { controller.enqueue({ type: "tool-call", ...toolCall }); } } controller.enqueue({ type: "finish", finishReason: result.finishReason, usage: result.usage, logprobs: result.logprobs, providerMetadata: result.providerMetadata }); controller.close(); } }); return { stream: simulatedStream, rawCall: result.rawCall, rawResponse: result.rawResponse, warnings: result.warnings }; } const { args, warnings } = this.getArgs(options); const { responseHeaders, value: response } = await postJsonToApi({ url: this.config.url({ path: "/chat/completions", modelId: this.modelId }), headers: combineHeaders(this.config.headers(), options.headers), body: { ...args, stream: true, // only include stream_options when in strict compatibility mode: stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0 }, failedResponseHandler: openaiFailedResponseHandler, successfulResponseHandler: createEventSourceResponseHandler( openaiChatChunkSchema ), abortSignal: options.abortSignal, fetch: this.config.fetch }); const { messages: rawPrompt, ...rawSettings } = args; const toolCalls = []; let finishReason = "unknown"; let usage = { promptTokens: void 0, completionTokens: void 0 }; let logprobs; let isFirstChunk = true; const { useLegacyFunctionCalling } = this.settings; return { stream: response.pipeThrough( new TransformStream({ transform(chunk, controller) { var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n; if (!chunk.success) { finishReason = "error"; controller.enqueue({ type: "error", error: chunk.error }); return; } const value = chunk.value; if ("error" in value) { finishReason = "error"; controller.enqueue({ type: "error", error: value.error }); return; } if (isFirstChunk) { isFirstChunk = false; controller.enqueue({ type: "response-metadata", ...getResponseMetadata(value) }); } if (value.usage != null) { usage = { promptTokens: (_a = value.usage.prompt_tokens) != null ? _a : void 0, completionTokens: (_b = value.usage.completion_tokens) != null ? _b : void 0 }; } const choice = value.choices[0]; if ((choice == null ? void 0 : choice.finish_reason) != null) { finishReason = mapOpenAIFinishReason(choice.finish_reason); } if ((choice == null ? void 0 : choice.delta) == null) { return; } const delta = choice.delta; if (delta.content != null) { controller.enqueue({ type: "text-delta", textDelta: delta.content }); } const mappedLogprobs = mapOpenAIChatLogProbsOutput( choice == null ? void 0 : choice.logprobs ); if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) { if (logprobs === void 0) logprobs = []; logprobs.push(...mappedLogprobs); } const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [ { type: "function", id: generateId(), function: delta.function_call, index: 0 } ] : delta.tool_calls; if (mappedToolCalls != null) { for (const toolCallDelta of mappedToolCalls) { const index = toolCallDelta.index; if (toolCalls[index] == null) { if (toolCallDelta.type !== "function") { throw new InvalidResponseDataError({ data: toolCallDelta, message: `Expected 'function' type.` }); } if (toolCallDelta.id == null) { throw new InvalidResponseDataError({ data: toolCallDelta, message: `Expected 'id' to be a string.` }); } if (((_c = toolCallDelta.function) == null ? void 0 : _c.name) == null) { throw new InvalidResponseDataError({ data: toolCallDelta, message: `Expected 'function.name' to be a string.` }); } toolCalls[index] = { id: toolCallDelta.id, type: "function", function: { name: toolCallDelta.function.name, arguments: (_d = toolCallDelta.function.arguments) != null ? _d : "" } }; const toolCall2 = toolCalls[index]; if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null) { if (toolCall2.function.arguments.length > 0) { controller.enqueue({ type: "tool-call-delta", toolCallType: "function", toolCallId: toolCall2.id, toolName: toolCall2.function.name, argsTextDelta: toolCall2.function.arguments }); } if (isParsableJson(toolCall2.function.arguments)) { controller.enqueue({ type: "tool-call", toolCallType: "function", toolCallId: (_g = toolCall2.id) != null ? _g : generateId(), toolName: toolCall2.function.name, args: toolCall2.function.arguments }); } } continue; } const toolCall = toolCalls[index]; if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) { toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : ""; } controller.enqueue({ type: "tool-call-delta", toolCallType: "function", toolCallId: toolCall.id, toolName: toolCall.function.name, argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : "" }); if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && isParsableJson(toolCall.function.arguments)) { controller.enqueue({ type: "tool-call", toolCallType: "function", toolCallId: (_n = toolCall.id) != null ? _n : generateId(), toolName: toolCall.function.name, args: toolCall.function.arguments }); } } } }, flush(controller) { var _a, _b; controller.enqueue({ type: "finish", finishReason, logprobs, usage: { promptTokens: (_a = usage.promptTokens) != null ? _a : NaN, completionTokens: (_b = usage.completionTokens) != null ? _b : NaN } }); } }) ), rawCall: { rawPrompt, rawSettings }, rawResponse: { headers: responseHeaders }, warnings }; } }; var openAITokenUsageSchema = z2.object({ prompt_tokens: z2.number().nullish(), completion_tokens: z2.number().nullish(), completion_tokens_details: z2.object({ reasoning_tokens: z2.number().nullish() }).nullish() }).nullish(); var openAIChatResponseSchema = z2.object({ id: z2.string().nullish(), created: z2.number().nullish(), model: z2.string().nullish(), choices: z2.array( z2.object({ message: z2.object({ role: z2.literal("assistant").nullish(), content: z2.string().nullish(), function_call: z2.object({ arguments: z2.string(), name: z2.string() }).nullish(), tool_calls: z2.array( z2.object({ id: z2.string().nullish(), type: z2.literal("function"), function: z2.object({ name: z2.string(), arguments: z2.string() }) }) ).nullish() }), index: z2.number(), logprobs: z2.object({ content: z2.array( z2.object({ token: z2.string(), logprob: z2.number(), top_logprobs: z2.array( z2.object({ token: z2.string(), logprob: z2.number() }) ) }) ).nullable() }).nullish(), finish_reason: z2.string().nullish() }) ), usage: openAITokenUsageSchema }); var openaiChatChunkSchema = z2.union([ z2.object({ id: z2.string().nullish(), created: z2.number().nullish(), model: z2.string().nullish(), choices: z2.array( z2.object({ delta: z2.object({ role: z2.enum(["assistant"]).nullish(), content: z2.string().nullish(), function_call: z2.object({ name: z2.string().optional(), arguments: z2.string().optional() }).nullish(), tool_calls: z2.array( z2.object({ index: z2.number(), id: z2.string().nullish(), type: z2.literal("function").optional(), function: z2.object({ name: z2.string().nullish(), arguments: z2.string().nullish() }) }) ).nullish() }).nullish(), logprobs: z2.object({ content: z2.array( z2.object({ token: z2.string(), logprob: z2.number(), top_logprobs: z2.array( z2.object({ token: z2.string(), logprob: z2.number() }) ) }) ).nullable() }).nullish(), finish_reason: z2.string().nullable().optional(), index: z2.number() }) ), usage: openAITokenUsageSchema }), openAIErrorDataSchema ]); function prepareToolsAndToolChoice({ mode, useLegacyFunctionCalling = false, structuredOutputs = false }) { var _a; const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0; if (tools == null) { return { tools: void 0, tool_choice: void 0 }; } const toolChoice = mode.toolChoice; if (useLegacyFunctionCalling) { const mappedFunctions = tools.map((tool) => ({ name: tool.name, description: tool.description, parameters: tool.parameters })); if (toolChoice == null) { return { functions: mappedFunctions, function_call: void 0 }; } const type2 = toolChoice.type; switch (type2) { case "auto": case "none": case void 0: return { functions: mappedFunctions, function_call: void 0 }; case "required": throw new UnsupportedFunctionalityError2({ functionality: "useLegacyFunctionCalling and toolChoice: required" }); default: return { functions: mappedFunctions, function_call: { name: toolChoice.toolName } }; } } const mappedTools = tools.map((tool) => ({ type: "function", function: { name: tool.name, description: tool.description, parameters: tool.parameters, strict: structuredOutputs === true ? true : void 0 } })); if (toolChoice == null) { return { tools: mappedTools, tool_choice: void 0 }; } const type = toolChoice.type; switch (type) { case "auto": case "none": case "required": return { tools: mappedTools, tool_choice: type }; case "tool": return { tools: mappedTools, tool_choice: { type: "function", function: { name: toolChoice.toolName } } }; default: { const _exhaustiveCheck = type; throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`); } } } function isReasoningModel(modelId) { return modelId.startsWith("o1-"); } // src/openai-completion-language-model.ts import { UnsupportedFunctionalityError as UnsupportedFunctionalityError4 } from "@ai-sdk/provider"; import { combineHeaders as combineHeaders2, createEventSourceResponseHandler as createEventSourceResponseHandler2, createJsonResponseHandler as createJsonResponseHandler2, postJsonToApi as postJsonToApi2 } from "@ai-sdk/provider-utils"; import { z as z3 } from "zod"; // src/convert-to-openai-completion-prompt.ts import { InvalidPromptError, UnsupportedFunctionalityError as UnsupportedFunctionalityError3 } from "@ai-sdk/provider"; function convertToOpenAICompletionPrompt({ prompt, inputFormat, user = "user", assistant = "assistant" }) { if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") { return { prompt: prompt[0].content[0].text }; } let text = ""; if (prompt[0].role === "system") { text += `${prompt[0].content} `; prompt = prompt.slice(1); } for (const { role, content } of prompt) { switch (role) { case "system": { throw new InvalidPromptError({ message: "Unexpected system message in prompt: ${content}", prompt }); } case "user": { const userMessage = content.map((part) => { switch (part.type) { case "text": { return part.text; } case "image": { throw new UnsupportedFunctionalityError3({ functionality: "images" }); } } }).join(""); text += `${user}: ${userMessage} `; break; } case "assistant": { const assistantMessage = content.map((part) => { switch (part.type) { case "text": { return part.text; } case "tool-call": { throw new UnsupportedFunctionalityError3({ functionality: "tool-call messages" }); } } }).join(""); text += `${assistant}: ${assistantMessage} `; break; } case "tool": { throw new UnsupportedFunctionalityError3({ functionality: "tool messages" }); } default: { const _exhaustiveCheck = role; throw new Error(`Unsupported role: ${_exhaustiveCheck}`); } } } text += `${assistant}: `; return { prompt: text, stopSequences: [` ${user}:`] }; } // src/map-openai-completion-logprobs.ts function mapOpenAICompletionLogProbs(logprobs) { return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({ token, logprob: logprobs.token_logprobs[index], topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map( ([token2, logprob]) => ({ token: token2, logprob }) ) : [] })); } // src/openai-completion-language-model.ts var OpenAICompletionLanguageModel = class { constructor(modelId, settings, config) { this.specificationVersion = "v1"; this.defaultObjectGenerationMode = void 0; this.modelId = modelId; this.settings = settings; this.config = config; } get provider() { return this.config.provider; } getArgs({ mode, inputFormat, prompt, maxTokens, temperature, topP, topK, frequencyPenalty, presencePenalty, stopSequences: userStopSequences, responseFormat, seed }) { var _a; const type = mode.type; const warnings = []; if (topK != null) { warnings.push({ type: "unsupported-setting", setting: "topK" }); } if (responseFormat != null && responseFormat.type !== "text") { warnings.push({ type: "unsupported-setting", setting: "responseFormat", details: "JSON response format is not supported." }); } const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat }); const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []]; const baseArgs = { // model id: model: this.modelId, // model specific settings: echo: this.settings.echo, logit_bias: this.settings.logitBias, logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0, suffix: this.settings.suffix, user: this.settings.user, // standardized settings: max_tokens: maxTokens, temperature, top_p: topP, frequency_penalty: frequencyPenalty, presence_penalty: presencePenalty, seed, // prompt: prompt: completionPrompt, // stop sequences: stop: stop.length > 0 ? stop : void 0 }; switch (type) { case "regular": { if ((_a = mode.tools) == null ? void 0 : _a.length) { throw new UnsupportedFunctionalityError4({ functionality: "tools" }); } if (mode.toolChoice) { throw new UnsupportedFunctionalityError4({ functionality: "toolChoice" }); } return { args: baseArgs, warnings }; } case "object-json": { throw new UnsupportedFunctionalityError4({ functionality: "object-json mode" }); } case "object-tool": { throw new UnsupportedFunctionalityError4({ functionality: "object-tool mode" }); } default: { const _exhaustiveCheck = type; throw new Error(`Unsupported type: ${_exhaustiveCheck}`); } } } async doGenerate(options) { const { args, warnings } = this.getArgs(options); const { responseHeaders, value: response } = await postJsonToApi2({ url: this.config.url({ path: "/completions", modelId: this.modelId }), headers: combineHeaders2(this.config.headers(), options.headers), body: args, failedResponseHandler: openaiFailedResponseHandler, successfulResponseHandler: createJsonResponseHandler2( openAICompletionResponseSchema ), abortSignal: options.abortSignal, fetch: this.config.fetch }); const { prompt: rawPrompt, ...rawSettings } = args; const choice = response.choices[0]; return { text: choice.text, usage: { promptTokens: response.usage.prompt_tokens, completionTokens: response.usage.completion_tokens }, finishReason: mapOpenAIFinishReason(choice.finish_reason), logprobs: mapOpenAICompletionLogProbs(choice.logprobs), rawCall: { rawPrompt, rawSettings }, rawResponse: { headers: responseHeaders }, response: getResponseMetadata(response), warnings }; } async doStream(options) { const { args, warnings } = this.getArgs(options); const { responseHeaders, value: response } = await postJsonToApi2({ url: this.config.url({ path: "/completions", modelId: this.modelId }), headers: combineHeaders2(this.config.headers(), options.headers), body: { ...args, stream: true, // only include stream_options when in strict compatibility mode: stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0 }, failedResponseHandler: openaiFailedResponseHandler, successfulResponseHandler: createEventSourceResponseHandler2( openaiCompletionChunkSchema ), abortSignal: options.abortSignal, fetch: this.config.fetch }); const { prompt: rawPrompt, ...rawSettings } = args; let finishReason = "unknown"; let usage = { promptTokens: Number.NaN, completionTokens: Number.NaN }; let logprobs; let isFirstChunk = true; return { stream: response.pipeThrough( new TransformStream({ transform(chunk, controller) { if (!chunk.success) { finishReason = "error"; controller.enqueue({ type: "error", error: chunk.error }); return; } const value = chunk.value; if ("error" in value) { finishReason = "error"; controller.enqueue({ type: "error", error: value.error }); return; } if (isFirstChunk) { isFirstChunk = false; controller.enqueue({ type: "response-metadata", ...getResponseMetadata(value) }); } if (value.usage != null) { usage = { promptTokens: value.usage.prompt_tokens, completionTokens: value.usage.completion_tokens }; } const choice = value.choices[0]; if ((choice == null ? void 0 : choice.finish_reason) != null) { finishReason = mapOpenAIFinishReason(choice.finish_reason); } if ((choice == null ? void 0 : choice.text) != null) { controller.enqueue({ type: "text-delta", textDelta: choice.text }); } const mappedLogprobs = mapOpenAICompletionLogProbs( choice == null ? void 0 : choice.logprobs ); if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) { if (logprobs === void 0) logprobs = []; logprobs.push(...mappedLogprobs); } }, flush(controller) { controller.enqueue({ type: "finish", finishReason, logprobs, usage }); } }) ), rawCall: { rawPrompt, rawSettings }, rawResponse: { headers: responseHeaders }, warnings }; } }; var openAICompletionResponseSchema = z3.object({ id: z3.string().nullish(), created: z3.number().nullish(), model: z3.string().nullish(), choices: z3.array( z3.object({ text: z3.string(), finish_reason: z3.string(), logprobs: z3.object({ tokens: z3.array(z3.string()), token_logprobs: z3.array(z3.number()), top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable() }).nullish() }) ), usage: z3.object({ prompt_tokens: z3.number(), completion_tokens: z3.number() }) }); var openaiCompletionChunkSchema = z3.union([ z3.object({ id: z3.string().nullish(), created: z3.number().nullish(), model: z3.string().nullish(), choices: z3.array( z3.object({ text: z3.string(), finish_reason: z3.string().nullish(), index: z3.number(), logprobs: z3.object({ tokens: z3.array(z3.string()), token_logprobs: z3.array(z3.number()), top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable() }).nullish() }) ), usage: z3.object({ prompt_tokens: z3.number(), completion_tokens: z3.number() }).nullish() }), openAIErrorDataSchema ]); // src/openai-facade.ts var OpenAI = class { /** * Creates a new OpenAI provider instance. */ constructor(options = {}) { var _a, _b; this.baseURL = (_b = withoutTrailingSlash((_a = options.baseURL) != null ? _a : options.baseUrl)) != null ? _b : "https://api.openai.com/v1"; this.apiKey = options.apiKey; this.organization = options.organization; this.project = options.project; this.headers = options.headers; } get baseConfig() { return { organization: this.organization, baseURL: this.baseURL, headers: () => ({ Authorization: `Bearer ${loadApiKey({ apiKey: this.apiKey, environmentVariableName: "OPENAI_API_KEY", description: "OpenAI" })}`, "OpenAI-Organization": this.organization, "OpenAI-Project": this.project, ...this.headers }) }; } chat(modelId, settings = {}) { return new OpenAIChatLanguageModel(modelId, settings, { provider: "openai.chat", ...this.baseConfig, compatibility: "strict", url: ({ path }) => `${this.baseURL}${path}` }); } completion(modelId, settings = {}) { return new OpenAICompletionLanguageModel(modelId, settings, { provider: "openai.completion", ...this.baseConfig, compatibility: "strict", url: ({ path }) => `${this.baseURL}${path}` }); } }; // src/openai-provider.ts import { loadApiKey as loadApiKey2, withoutTrailingSlash as withoutTrailingSlash2 } from "@ai-sdk/provider-utils"; // src/openai-embedding-model.ts import { TooManyEmbeddingValuesForCallError } from "@ai-sdk/provider"; import { combineHeaders as combineHeaders3, createJsonResponseHandler as createJsonResponseHandler3, postJsonToApi as postJsonToApi3 } from "@ai-sdk/provider-utils"; import { z as z4 } from "zod"; var OpenAIEmbeddingModel = class { constructor(modelId, settings, config) { this.specificationVersion = "v1"; this.modelId = modelId; this.settings = settings; this.config = config; } get provider() { return this.config.provider; } get maxEmbeddingsPerCall() { var _a; return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048; } get supportsParallelCalls() { var _a; return (_a = this.settings.supportsParallelCalls) != null ? _a : true; } async doEmbed({ values, headers, abortSignal }) { if (values.length > this.maxEmbeddingsPerCall) { throw new TooManyEmbeddingValuesForCallError({ provider: this.provider, modelId: this.modelId, maxEmbeddingsPerCall: this.maxEmbeddingsPerCall, values }); } const { responseHeaders, value: response } = await postJsonToApi3({ url: this.config.url({ path: "/embeddings", modelId: this.modelId }), headers: combineHeaders3(this.config.headers(), headers), body: { model: this.modelId, input: values, encoding_format: "float", dimensions: this.settings.dimensions, user: this.settings.user }, failedResponseHandler: openaiFailedResponseHandler, successfulResponseHandler: createJsonResponseHandler3( openaiTextEmbeddingResponseSchema ), abortSignal, fetch: this.config.fetch }); return { embeddings: response.data.map((item) => item.embedding), usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0, rawResponse: { headers: responseHeaders } }; } }; var openaiTextEmbeddingResponseSchema = z4.object({ data: z4.array(z4.object({ embedding: z4.array(z4.number()) })), usage: z4.object({ prompt_tokens: z4.number() }).nullish() }); // src/openai-provider.ts function createOpenAI(options = {}) { var _a, _b, _c; const baseURL = (_b = withoutTrailingSlash2((_a = options.baseURL) != null ? _a : options.baseUrl)) != null ? _b : "https://api.openai.com/v1"; const compatibility = (_c = options.compatibility) != null ? _c : "compatible"; const getHeaders = () => ({ Authorization: `Bearer ${loadApiKey2({ apiKey: options.apiKey, environmentVariableName: "OPENAI_API_KEY", description: "OpenAI" })}`, "OpenAI-Organization": options.organization, "OpenAI-Project": options.project, ...options.headers }); const createChatModel = (modelId, settings = {}) => new OpenAIChatLanguageModel(modelId, settings, { provider: "openai.chat", url: ({ path }) => `${baseURL}${path}`, headers: getHeaders, compatibility, fetch: options.fetch }); const createCompletionModel = (modelId, settings = {}) => new OpenAICompletionLanguageModel(modelId, settings, { provider: "openai.completion", url: ({ path }) => `${baseURL}${path}`, headers: getHeaders, compatibility, fetch: options.fetch }); const createEmbeddingModel = (modelId, settings = {}) => new OpenAIEmbeddingModel(modelId, settings, { provider: "openai.embedding", url: ({ path }) => `${baseURL}${path}`, headers: getHeaders, fetch: options.fetch }); const createLanguageModel = (modelId, settings) => { if (new.target) { throw new Error( "The OpenAI model function cannot be called with the new keyword." ); } if (modelId === "gpt-3.5-turbo-instruct") { return createCompletionModel( modelId, settings ); } return createChatModel(modelId, settings); }; const provider = function(modelId, settings) { return createLanguageModel(modelId, settings); }; provider.languageModel = createLanguageModel; provider.chat = createChatModel; provider.completion = createCompletionModel; provider.embedding = createEmbeddingModel; provider.textEmbedding = createEmbeddingModel; provider.textEmbeddingModel = createEmbeddingModel; return provider; } var openai = createOpenAI({ compatibility: "strict" // strict for OpenAI API }); export { OpenAI, createOpenAI, openai }; //# sourceMappingURL=index.mjs.map