UNPKG

ai

Version:

AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript

1 lines 853 kB
{"version":3,"sources":["../src/index.ts","../src/generate-text/generate-text.ts","../src/error/index.ts","../src/error/invalid-argument-error.ts","../src/error/invalid-stream-part-error.ts","../src/error/invalid-tool-approval-error.ts","../src/error/invalid-tool-input-error.ts","../src/error/tool-call-not-found-for-approval-error.ts","../src/error/no-image-generated-error.ts","../src/error/no-object-generated-error.ts","../src/error/no-output-generated-error.ts","../src/error/no-speech-generated-error.ts","../src/error/no-such-tool-error.ts","../src/error/tool-call-repair-error.ts","../src/error/unsupported-model-version-error.ts","../src/prompt/invalid-data-content-error.ts","../src/prompt/invalid-message-role-error.ts","../src/prompt/message-conversion-error.ts","../src/util/retry-error.ts","../src/logger/log-warnings.ts","../src/model/resolve-model.ts","../src/util/log-v2-compatibility-warning.ts","../src/model/as-embedding-model-v3.ts","../src/model/as-image-model-v3.ts","../src/model/as-language-model-v3.ts","../src/model/as-speech-model-v3.ts","../src/model/as-transcription-model-v3.ts","../src/prompt/convert-to-language-model-prompt.ts","../src/util/detect-media-type.ts","../src/util/download/download.ts","../src/version.ts","../src/util/download/download-function.ts","../src/prompt/data-content.ts","../src/prompt/split-data-url.ts","../src/util/as-array.ts","../src/prompt/create-tool-model-output.ts","../src/prompt/prepare-call-settings.ts","../src/prompt/prepare-tools-and-tool-choice.ts","../src/util/is-non-empty-object.ts","../src/prompt/standardize-prompt.ts","../src/prompt/message.ts","../src/types/provider-metadata.ts","../src/types/json-value.ts","../src/prompt/content-part.ts","../src/prompt/wrap-gateway-error.ts","../src/telemetry/assemble-operation-name.ts","../src/telemetry/get-base-telemetry-attributes.ts","../src/telemetry/get-tracer.ts","../src/telemetry/noop-tracer.ts","../src/telemetry/record-span.ts","../src/telemetry/select-telemetry-attributes.ts","../src/telemetry/stringify-for-telemetry.ts","../src/types/usage.ts","../src/util/merge-objects.ts","../src/util/retry-with-exponential-backoff.ts","../src/util/prepare-retries.ts","../src/generate-text/collect-tool-approvals.ts","../src/generate-text/execute-tool-call.ts","../src/generate-text/extract-text-content.ts","../src/generate-text/generated-file.ts","../src/generate-text/is-approval-needed.ts","../src/generate-text/output.ts","../src/util/parse-partial-json.ts","../src/util/fix-json.ts","../src/generate-text/parse-tool-call.ts","../src/generate-text/step-result.ts","../src/generate-text/stop-condition.ts","../src/generate-text/to-response-messages.ts","../src/generate-text/stream-text.ts","../src/util/prepare-headers.ts","../src/text-stream/create-text-stream-response.ts","../src/util/write-to-server-response.ts","../src/text-stream/pipe-text-stream-to-response.ts","../src/ui-message-stream/json-to-sse-transform-stream.ts","../src/ui-message-stream/ui-message-stream-headers.ts","../src/ui-message-stream/create-ui-message-stream-response.ts","../src/ui-message-stream/get-response-ui-message-id.ts","../src/ui/process-ui-message-stream.ts","../src/ui-message-stream/ui-message-chunks.ts","../src/ui/ui-messages.ts","../src/ui-message-stream/handle-ui-message-stream-finish.ts","../src/ui-message-stream/pipe-ui-message-stream-to-response.ts","../src/util/async-iterable-stream.ts","../src/util/consume-stream.ts","../src/util/create-resolvable-promise.ts","../src/util/create-stitchable-stream.ts","../src/util/now.ts","../src/generate-text/run-tools-transformation.ts","../src/agent/tool-loop-agent.ts","../src/ui-message-stream/create-ui-message-stream.ts","../src/ui-message-stream/read-ui-message-stream.ts","../src/ui/convert-to-model-messages.ts","../src/ui/validate-ui-messages.ts","../src/agent/create-agent-ui-stream.ts","../src/agent/create-agent-ui-stream-response.ts","../src/agent/pipe-agent-ui-stream-to-response.ts","../src/embed/embed.ts","../src/embed/embed-many.ts","../src/util/split-array.ts","../src/generate-image/generate-image.ts","../src/generate-image/index.ts","../src/generate-object/generate-object.ts","../src/generate-text/extract-reasoning-content.ts","../src/generate-object/output-strategy.ts","../src/generate-object/parse-and-validate-object-result.ts","../src/generate-object/validate-object-generation-input.ts","../src/generate-object/stream-object.ts","../src/util/cosine-similarity.ts","../src/util/data-url.ts","../src/util/is-deep-equal-data.ts","../src/util/serial-job-executor.ts","../src/util/simulate-readable-stream.ts","../src/generate-speech/generate-speech.ts","../src/generate-speech/generated-audio-file.ts","../src/generate-text/prune-messages.ts","../src/generate-text/smooth-stream.ts","../src/middleware/default-embedding-settings-middleware.ts","../src/middleware/default-settings-middleware.ts","../src/util/get-potential-start-index.ts","../src/middleware/extract-reasoning-middleware.ts","../src/middleware/simulate-streaming-middleware.ts","../src/middleware/add-tool-input-examples-middleware.ts","../src/middleware/wrap-language-model.ts","../src/middleware/wrap-embedding-model.ts","../src/model/as-provider-v3.ts","../src/middleware/wrap-provider.ts","../src/registry/custom-provider.ts","../src/registry/no-such-provider-error.ts","../src/registry/provider-registry.ts","../src/rerank/rerank.ts","../src/transcribe/transcribe.ts","../src/error/no-transcript-generated-error.ts","../src/ui/call-completion-api.ts","../src/ui/process-text-stream.ts","../src/ui/chat.ts","../src/ui/convert-file-list-to-file-ui-parts.ts","../src/ui/default-chat-transport.ts","../src/ui/http-chat-transport.ts","../src/ui/last-assistant-message-is-complete-with-approval-responses.ts","../src/ui/last-assistant-message-is-complete-with-tool-calls.ts","../src/ui/transform-text-to-ui-message-stream.ts","../src/ui/text-stream-chat-transport.ts"],"sourcesContent":["// re-exports:\nexport { createGateway, gateway, type GatewayModelId } from '@ai-sdk/gateway';\nexport {\n asSchema,\n createIdGenerator,\n dynamicTool,\n generateId,\n jsonSchema,\n parseJsonEventStream,\n tool,\n zodSchema,\n type FlexibleSchema,\n type IdGenerator,\n type InferSchema,\n type InferToolInput,\n type InferToolOutput,\n type Schema,\n type Tool,\n type ToolApprovalRequest,\n type ToolApprovalResponse,\n type ToolCallOptions,\n type ToolExecutionOptions,\n type ToolExecuteFunction,\n} from '@ai-sdk/provider-utils';\n\n// directory exports\nexport * from './agent';\nexport * from './embed';\nexport * from './error';\nexport * from './generate-image';\nexport * from './generate-object';\nexport * from './generate-speech';\nexport * from './generate-text';\nexport * from './logger';\nexport * from './middleware';\nexport * from './prompt';\nexport * from './registry';\nexport * from './rerank';\nexport * from './text-stream';\nexport * from './transcribe';\nexport * from './types';\nexport * from './ui';\nexport * from './ui-message-stream';\nexport * from './util';\n\n// telemetry types:\nexport type { TelemetrySettings } from './telemetry/telemetry-settings';\n\n// import globals\nimport './global';\n","import {\n LanguageModelV3,\n LanguageModelV3Content,\n LanguageModelV3ToolCall,\n} from '@ai-sdk/provider';\nimport {\n createIdGenerator,\n getErrorMessage,\n IdGenerator,\n ProviderOptions,\n ToolApprovalResponse,\n withUserAgentSuffix,\n} from '@ai-sdk/provider-utils';\nimport { Tracer } from '@opentelemetry/api';\nimport { NoOutputGeneratedError } from '../error';\nimport { logWarnings } from '../logger/log-warnings';\nimport { resolveLanguageModel } from '../model/resolve-model';\nimport { ModelMessage } from '../prompt';\nimport { CallSettings } from '../prompt/call-settings';\nimport { convertToLanguageModelPrompt } from '../prompt/convert-to-language-model-prompt';\nimport { createToolModelOutput } from '../prompt/create-tool-model-output';\nimport { prepareCallSettings } from '../prompt/prepare-call-settings';\nimport { prepareToolsAndToolChoice } from '../prompt/prepare-tools-and-tool-choice';\nimport { Prompt } from '../prompt/prompt';\nimport { standardizePrompt } from '../prompt/standardize-prompt';\nimport { wrapGatewayError } from '../prompt/wrap-gateway-error';\nimport { ToolCallNotFoundForApprovalError } from '../error/tool-call-not-found-for-approval-error';\nimport { assembleOperationName } from '../telemetry/assemble-operation-name';\nimport { getBaseTelemetryAttributes } from '../telemetry/get-base-telemetry-attributes';\nimport { getTracer } from '../telemetry/get-tracer';\nimport { recordSpan } from '../telemetry/record-span';\nimport { selectTelemetryAttributes } from '../telemetry/select-telemetry-attributes';\nimport { stringifyForTelemetry } from '../telemetry/stringify-for-telemetry';\nimport { TelemetrySettings } from '../telemetry/telemetry-settings';\nimport { LanguageModel, ToolChoice } from '../types';\nimport {\n addLanguageModelUsage,\n asLanguageModelUsage,\n LanguageModelUsage,\n} from '../types/usage';\nimport { asArray } from '../util/as-array';\nimport { DownloadFunction } from '../util/download/download-function';\nimport { mergeObjects } from '../util/merge-objects';\nimport { prepareRetries } from '../util/prepare-retries';\nimport { VERSION } from '../version';\nimport { collectToolApprovals } from './collect-tool-approvals';\nimport { ContentPart } from './content-part';\nimport { executeToolCall } from './execute-tool-call';\nimport { extractTextContent } from './extract-text-content';\nimport { GenerateTextResult } from './generate-text-result';\nimport { DefaultGeneratedFile } from './generated-file';\nimport { isApprovalNeeded } from './is-approval-needed';\nimport { Output, text } from './output';\nimport { InferCompleteOutput } from './output-utils';\nimport { parseToolCall } from './parse-tool-call';\nimport { PrepareStepFunction } from './prepare-step';\nimport { ResponseMessage } from './response-message';\nimport { DefaultStepResult, StepResult } from './step-result';\nimport {\n isStopConditionMet,\n stepCountIs,\n StopCondition,\n} from './stop-condition';\nimport { toResponseMessages } from './to-response-messages';\nimport { ToolApprovalRequestOutput } from './tool-approval-request-output';\nimport { TypedToolCall } from './tool-call';\nimport { ToolCallRepairFunction } from './tool-call-repair-function';\nimport { TypedToolError } from './tool-error';\nimport { ToolOutput } from './tool-output';\nimport { TypedToolResult } from './tool-result';\nimport { ToolSet } from './tool-set';\n\nconst originalGenerateId = createIdGenerator({\n prefix: 'aitxt',\n size: 24,\n});\n\n/**\nCallback that is set using the `onStepFinish` option.\n\n@param stepResult - The result of the step.\n */\nexport type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (\n stepResult: StepResult<TOOLS>,\n) => Promise<void> | void;\n\n/**\nCallback that is set using the `onFinish` option.\n\n@param event - The event that is passed to the callback.\n */\nexport type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (\n event: StepResult<TOOLS> & {\n /**\n * Details for all steps.\n */\n readonly steps: StepResult<TOOLS>[];\n\n /**\n * Total usage for all steps. This is the sum of the usage of all steps.\n */\n readonly totalUsage: LanguageModelUsage;\n\n /**\n * Context that is passed into tool execution.\n *\n * Experimental (can break in patch releases).\n *\n * @default undefined\n */\n experimental_context: unknown;\n },\n) => PromiseLike<void> | void;\n\n/**\nGenerate a text and call tools for a given prompt using a language model.\n\nThis function does not stream the output. If you want to stream the output, use `streamText` instead.\n\n@param model - The language model to use.\n\n@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.\n@param toolChoice - The tool choice strategy. Default: 'auto'.\n\n@param system - A system message that will be part of the prompt.\n@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.\n@param messages - A list of messages. You can either use `prompt` or `messages` but not both.\n\n@param maxOutputTokens - Maximum number of tokens to generate.\n@param temperature - Temperature setting.\nThe value is passed through to the provider. The range depends on the provider and model.\nIt is recommended to set either `temperature` or `topP`, but not both.\n@param topP - Nucleus sampling.\nThe value is passed through to the provider. The range depends on the provider and model.\nIt is recommended to set either `temperature` or `topP`, but not both.\n@param topK - Only sample from the top K options for each subsequent token.\nUsed to remove \"long tail\" low probability responses.\nRecommended for advanced use cases only. You usually only need to use temperature.\n@param presencePenalty - Presence penalty setting.\nIt affects the likelihood of the model to repeat information that is already in the prompt.\nThe value is passed through to the provider. The range depends on the provider and model.\n@param frequencyPenalty - Frequency penalty setting.\nIt affects the likelihood of the model to repeatedly use the same words or phrases.\nThe value is passed through to the provider. The range depends on the provider and model.\n@param stopSequences - Stop sequences.\nIf set, the model will stop generating text when one of the stop sequences is generated.\n@param seed - The seed (integer) to use for random sampling.\nIf set and supported by the model, calls will generate deterministic results.\n\n@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.\n@param abortSignal - An optional abort signal that can be used to cancel the call.\n@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.\n\n@param experimental_generateMessageId - Generate a unique ID for each message.\n\n@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.\n@param onFinish - Callback that is called when all steps are finished and the response is complete.\n\n@returns\nA result object that contains the generated text, the results of the tool calls, and additional information.\n */\nexport async function generateText<\n TOOLS extends ToolSet,\n OUTPUT extends Output = Output<string, string>,\n>({\n model: modelArg,\n tools,\n toolChoice,\n system,\n prompt,\n messages,\n maxRetries: maxRetriesArg,\n abortSignal,\n headers,\n stopWhen = stepCountIs(1),\n experimental_output,\n output = experimental_output,\n experimental_telemetry: telemetry,\n providerOptions,\n experimental_activeTools,\n activeTools = experimental_activeTools,\n experimental_prepareStep,\n prepareStep = experimental_prepareStep,\n experimental_repairToolCall: repairToolCall,\n experimental_download: download,\n experimental_context,\n _internal: {\n generateId = originalGenerateId,\n currentDate = () => new Date(),\n } = {},\n onStepFinish,\n onFinish,\n ...settings\n}: CallSettings &\n Prompt & {\n /**\nThe language model to use.\n */\n model: LanguageModel;\n\n /**\nThe tools that the model can call. The model needs to support calling tools.\n*/\n tools?: TOOLS;\n\n /**\nThe tool choice strategy. Default: 'auto'.\n */\n toolChoice?: ToolChoice<NoInfer<TOOLS>>;\n\n /**\nCondition for stopping the generation when there are tool results in the last step.\nWhen the condition is an array, any of the conditions can be met to stop the generation.\n\n@default stepCountIs(1)\n */\n stopWhen?:\n | StopCondition<NoInfer<TOOLS>>\n | Array<StopCondition<NoInfer<TOOLS>>>;\n\n /**\nOptional telemetry configuration (experimental).\n */\n experimental_telemetry?: TelemetrySettings;\n\n /**\nAdditional provider-specific options. They are passed through\nto the provider from the AI SDK and enable provider-specific\nfunctionality that can be fully encapsulated in the provider.\n */\n providerOptions?: ProviderOptions;\n\n /**\n * @deprecated Use `activeTools` instead.\n */\n experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;\n\n /**\nLimits the tools that are available for the model to call without\nchanging the tool call and result types in the result.\n */\n activeTools?: Array<keyof NoInfer<TOOLS>>;\n\n /**\nOptional specification for parsing structured outputs from the LLM response.\n */\n output?: OUTPUT;\n\n /**\nOptional specification for parsing structured outputs from the LLM response.\n\n@deprecated Use `output` instead.\n */\n experimental_output?: OUTPUT;\n\n /**\nCustom download function to use for URLs.\n\nBy default, files are downloaded if the model does not support the URL for the given media type.\n */\n experimental_download?: DownloadFunction | undefined;\n\n /**\n * @deprecated Use `prepareStep` instead.\n */\n experimental_prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;\n\n /**\nOptional function that you can use to provide different settings for a step.\n */\n prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;\n\n /**\nA function that attempts to repair a tool call that failed to parse.\n */\n experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>;\n\n /**\n * Callback that is called when each step (LLM call) is finished, including intermediate steps.\n */\n onStepFinish?: GenerateTextOnStepFinishCallback<NoInfer<TOOLS>>;\n\n /**\n * Callback that is called when all steps are finished and the response is complete.\n */\n onFinish?: GenerateTextOnFinishCallback<NoInfer<TOOLS>>;\n\n /**\n * Context that is passed into tool execution.\n *\n * Experimental (can break in patch releases).\n *\n * @default undefined\n */\n experimental_context?: unknown;\n\n /**\n * Internal. For test use only. May change without notice.\n */\n _internal?: {\n generateId?: IdGenerator;\n currentDate?: () => Date;\n };\n }): Promise<GenerateTextResult<TOOLS, OUTPUT>> {\n const model = resolveLanguageModel(modelArg);\n const stopConditions = asArray(stopWhen);\n const { maxRetries, retry } = prepareRetries({\n maxRetries: maxRetriesArg,\n abortSignal,\n });\n\n const callSettings = prepareCallSettings(settings);\n\n const headersWithUserAgent = withUserAgentSuffix(\n headers ?? {},\n `ai/${VERSION}`,\n );\n\n const baseTelemetryAttributes = getBaseTelemetryAttributes({\n model,\n telemetry,\n headers: headersWithUserAgent,\n settings: { ...callSettings, maxRetries },\n });\n\n const initialPrompt = await standardizePrompt({\n system,\n prompt,\n messages,\n } as Prompt);\n\n const tracer = getTracer(telemetry);\n\n try {\n return await recordSpan({\n name: 'ai.generateText',\n attributes: selectTelemetryAttributes({\n telemetry,\n attributes: {\n ...assembleOperationName({\n operationId: 'ai.generateText',\n telemetry,\n }),\n ...baseTelemetryAttributes,\n // model:\n 'ai.model.provider': model.provider,\n 'ai.model.id': model.modelId,\n // specific settings that only make sense on the outer level:\n 'ai.prompt': {\n input: () => JSON.stringify({ system, prompt, messages }),\n },\n },\n }),\n tracer,\n fn: async span => {\n const initialMessages = initialPrompt.messages;\n const responseMessages: Array<ResponseMessage> = [];\n\n const { approvedToolApprovals, deniedToolApprovals } =\n collectToolApprovals<TOOLS>({ messages: initialMessages });\n\n const localApprovedToolApprovals = approvedToolApprovals.filter(\n toolApproval => !toolApproval.toolCall.providerExecuted,\n );\n\n if (\n deniedToolApprovals.length > 0 ||\n localApprovedToolApprovals.length > 0\n ) {\n const toolOutputs = await executeTools({\n toolCalls: localApprovedToolApprovals.map(\n toolApproval => toolApproval.toolCall,\n ),\n tools: tools as TOOLS,\n tracer,\n telemetry,\n messages: initialMessages,\n abortSignal,\n experimental_context,\n });\n\n const toolContent: Array<any> = [];\n\n // add regular tool results for approved tool calls:\n for (const output of toolOutputs) {\n const modelOutput = await createToolModelOutput({\n toolCallId: output.toolCallId,\n input: output.input,\n tool: tools?.[output.toolName],\n output:\n output.type === 'tool-result' ? output.output : output.error,\n errorMode: output.type === 'tool-error' ? 'json' : 'none',\n });\n\n toolContent.push({\n type: 'tool-result' as const,\n toolCallId: output.toolCallId,\n toolName: output.toolName,\n output: modelOutput,\n });\n }\n\n // add execution denied tool results for all denied tool approvals:\n for (const toolApproval of deniedToolApprovals) {\n toolContent.push({\n type: 'tool-result' as const,\n toolCallId: toolApproval.toolCall.toolCallId,\n toolName: toolApproval.toolCall.toolName,\n output: {\n type: 'execution-denied' as const,\n reason: toolApproval.approvalResponse.reason,\n // For provider-executed tools, include approvalId so provider can correlate\n ...(toolApproval.toolCall.providerExecuted && {\n providerOptions: {\n openai: {\n approvalId: toolApproval.approvalResponse.approvalId,\n },\n },\n }),\n },\n });\n }\n\n responseMessages.push({\n role: 'tool',\n content: toolContent,\n });\n }\n\n // Forward provider-executed approval responses to the provider\n const providerExecutedToolApprovals = [\n ...approvedToolApprovals,\n ...deniedToolApprovals,\n ].filter(toolApproval => toolApproval.toolCall.providerExecuted);\n\n if (providerExecutedToolApprovals.length > 0) {\n responseMessages.push({\n role: 'tool',\n content: providerExecutedToolApprovals.map(\n toolApproval =>\n ({\n type: 'tool-approval-response',\n approvalId: toolApproval.approvalResponse.approvalId,\n approved: toolApproval.approvalResponse.approved,\n reason: toolApproval.approvalResponse.reason,\n providerExecuted: true,\n }) satisfies ToolApprovalResponse,\n ),\n });\n }\n\n const callSettings = prepareCallSettings(settings);\n\n let currentModelResponse: Awaited<\n ReturnType<LanguageModelV3['doGenerate']>\n > & { response: { id: string; timestamp: Date; modelId: string } };\n let clientToolCalls: Array<TypedToolCall<TOOLS>> = [];\n let clientToolOutputs: Array<ToolOutput<TOOLS>> = [];\n const steps: GenerateTextResult<TOOLS, OUTPUT>['steps'] = [];\n\n // Track provider-executed tool calls that support deferred results\n // (e.g., code_execution in programmatic tool calling scenarios).\n // These tools may not return their results in the same turn as their call.\n const pendingDeferredToolCalls = new Map<\n string,\n { toolName: string }\n >();\n\n do {\n const stepInputMessages = [...initialMessages, ...responseMessages];\n\n const prepareStepResult = await prepareStep?.({\n model,\n steps,\n stepNumber: steps.length,\n messages: stepInputMessages,\n experimental_context,\n });\n\n const stepModel = resolveLanguageModel(\n prepareStepResult?.model ?? model,\n );\n\n const promptMessages = await convertToLanguageModelPrompt({\n prompt: {\n system: prepareStepResult?.system ?? initialPrompt.system,\n messages: prepareStepResult?.messages ?? stepInputMessages,\n },\n supportedUrls: await stepModel.supportedUrls,\n download,\n });\n\n experimental_context =\n prepareStepResult?.experimental_context ?? experimental_context;\n\n const { toolChoice: stepToolChoice, tools: stepTools } =\n await prepareToolsAndToolChoice({\n tools,\n toolChoice: prepareStepResult?.toolChoice ?? toolChoice,\n activeTools: prepareStepResult?.activeTools ?? activeTools,\n });\n\n currentModelResponse = await retry(() =>\n recordSpan({\n name: 'ai.generateText.doGenerate',\n attributes: selectTelemetryAttributes({\n telemetry,\n attributes: {\n ...assembleOperationName({\n operationId: 'ai.generateText.doGenerate',\n telemetry,\n }),\n ...baseTelemetryAttributes,\n // model:\n 'ai.model.provider': stepModel.provider,\n 'ai.model.id': stepModel.modelId,\n // prompt:\n 'ai.prompt.messages': {\n input: () => stringifyForTelemetry(promptMessages),\n },\n 'ai.prompt.tools': {\n // convert the language model level tools:\n input: () => stepTools?.map(tool => JSON.stringify(tool)),\n },\n 'ai.prompt.toolChoice': {\n input: () =>\n stepToolChoice != null\n ? JSON.stringify(stepToolChoice)\n : undefined,\n },\n\n // standardized gen-ai llm span attributes:\n 'gen_ai.system': stepModel.provider,\n 'gen_ai.request.model': stepModel.modelId,\n 'gen_ai.request.frequency_penalty': settings.frequencyPenalty,\n 'gen_ai.request.max_tokens': settings.maxOutputTokens,\n 'gen_ai.request.presence_penalty': settings.presencePenalty,\n 'gen_ai.request.stop_sequences': settings.stopSequences,\n 'gen_ai.request.temperature':\n settings.temperature ?? undefined,\n 'gen_ai.request.top_k': settings.topK,\n 'gen_ai.request.top_p': settings.topP,\n },\n }),\n tracer,\n fn: async span => {\n const stepProviderOptions = mergeObjects(\n providerOptions,\n prepareStepResult?.providerOptions,\n );\n\n const result = await stepModel.doGenerate({\n ...callSettings,\n tools: stepTools,\n toolChoice: stepToolChoice,\n responseFormat: await output?.responseFormat,\n prompt: promptMessages,\n providerOptions: stepProviderOptions,\n abortSignal,\n headers: headersWithUserAgent,\n });\n\n // Fill in default values:\n const responseData = {\n id: result.response?.id ?? generateId(),\n timestamp: result.response?.timestamp ?? currentDate(),\n modelId: result.response?.modelId ?? stepModel.modelId,\n headers: result.response?.headers,\n body: result.response?.body,\n };\n\n // Add response information to the span:\n span.setAttributes(\n await selectTelemetryAttributes({\n telemetry,\n attributes: {\n 'ai.response.finishReason': result.finishReason.unified,\n 'ai.response.text': {\n output: () => extractTextContent(result.content),\n },\n 'ai.response.toolCalls': {\n output: () => {\n const toolCalls = asToolCalls(result.content);\n return toolCalls == null\n ? undefined\n : JSON.stringify(toolCalls);\n },\n },\n 'ai.response.id': responseData.id,\n 'ai.response.model': responseData.modelId,\n 'ai.response.timestamp':\n responseData.timestamp.toISOString(),\n 'ai.response.providerMetadata': JSON.stringify(\n result.providerMetadata,\n ),\n\n // TODO rename telemetry attributes to inputTokens and outputTokens\n 'ai.usage.promptTokens': result.usage.inputTokens.total,\n 'ai.usage.completionTokens':\n result.usage.outputTokens.total,\n\n // standardized gen-ai llm span attributes:\n 'gen_ai.response.finish_reasons': [\n result.finishReason.unified,\n ],\n 'gen_ai.response.id': responseData.id,\n 'gen_ai.response.model': responseData.modelId,\n 'gen_ai.usage.input_tokens':\n result.usage.inputTokens.total,\n 'gen_ai.usage.output_tokens':\n result.usage.outputTokens.total,\n },\n }),\n );\n\n return { ...result, response: responseData };\n },\n }),\n );\n\n // parse tool calls:\n const stepToolCalls: TypedToolCall<TOOLS>[] = await Promise.all(\n currentModelResponse.content\n .filter(\n (part): part is LanguageModelV3ToolCall =>\n part.type === 'tool-call',\n )\n .map(toolCall =>\n parseToolCall({\n toolCall,\n tools,\n repairToolCall,\n system,\n messages: stepInputMessages,\n }),\n ),\n );\n const toolApprovalRequests: Record<\n string,\n ToolApprovalRequestOutput<TOOLS>\n > = {};\n\n // notify the tools that the tool calls are available:\n for (const toolCall of stepToolCalls) {\n if (toolCall.invalid) {\n continue; // ignore invalid tool calls\n }\n\n const tool = tools?.[toolCall.toolName];\n\n if (tool == null) {\n // ignore tool calls for tools that are not available,\n // e.g. provider-executed dynamic tools\n continue;\n }\n\n if (tool?.onInputAvailable != null) {\n await tool.onInputAvailable({\n input: toolCall.input,\n toolCallId: toolCall.toolCallId,\n messages: stepInputMessages,\n abortSignal,\n experimental_context,\n });\n }\n\n if (\n await isApprovalNeeded({\n tool,\n toolCall,\n messages: stepInputMessages,\n experimental_context,\n })\n ) {\n toolApprovalRequests[toolCall.toolCallId] = {\n type: 'tool-approval-request',\n approvalId: generateId(),\n toolCall,\n };\n }\n }\n\n // insert error tool outputs for invalid tool calls:\n // TODO AI SDK 6: invalid inputs should not require output parts\n const invalidToolCalls = stepToolCalls.filter(\n toolCall => toolCall.invalid && toolCall.dynamic,\n );\n\n clientToolOutputs = [];\n\n for (const toolCall of invalidToolCalls) {\n clientToolOutputs.push({\n type: 'tool-error',\n toolCallId: toolCall.toolCallId,\n toolName: toolCall.toolName,\n input: toolCall.input,\n error: getErrorMessage(toolCall.error!),\n dynamic: true,\n });\n }\n\n // execute client tool calls:\n clientToolCalls = stepToolCalls.filter(\n toolCall => !toolCall.providerExecuted,\n );\n\n if (tools != null) {\n clientToolOutputs.push(\n ...(await executeTools({\n toolCalls: clientToolCalls.filter(\n toolCall =>\n !toolCall.invalid &&\n toolApprovalRequests[toolCall.toolCallId] == null,\n ),\n tools,\n tracer,\n telemetry,\n messages: stepInputMessages,\n abortSignal,\n experimental_context,\n })),\n );\n }\n\n // Track provider-executed tool calls that support deferred results.\n // In programmatic tool calling, a server tool (e.g., code_execution) may\n // trigger a client tool, and the server tool's result is deferred until\n // the client tool's result is sent back.\n for (const toolCall of stepToolCalls) {\n if (!toolCall.providerExecuted) continue;\n const tool = tools?.[toolCall.toolName];\n if (tool?.type === 'provider' && tool.supportsDeferredResults) {\n // Check if this tool call already has a result in the current response\n const hasResultInResponse = currentModelResponse.content.some(\n part =>\n part.type === 'tool-result' &&\n part.toolCallId === toolCall.toolCallId,\n );\n if (!hasResultInResponse) {\n pendingDeferredToolCalls.set(toolCall.toolCallId, {\n toolName: toolCall.toolName,\n });\n }\n }\n }\n\n // Mark deferred tool calls as resolved when we receive their results\n for (const part of currentModelResponse.content) {\n if (part.type === 'tool-result') {\n pendingDeferredToolCalls.delete(part.toolCallId);\n }\n }\n\n // content:\n const stepContent = asContent({\n content: currentModelResponse.content,\n toolCalls: stepToolCalls,\n toolOutputs: clientToolOutputs,\n toolApprovalRequests: Object.values(toolApprovalRequests),\n tools,\n });\n\n // append to messages for potential next step:\n responseMessages.push(\n ...(await toResponseMessages({\n content: stepContent,\n tools,\n })),\n );\n\n // Add step information (after response messages are updated):\n const currentStepResult: StepResult<TOOLS> = new DefaultStepResult({\n content: stepContent,\n finishReason: currentModelResponse.finishReason.unified,\n rawFinishReason: currentModelResponse.finishReason.raw,\n usage: asLanguageModelUsage(currentModelResponse.usage),\n warnings: currentModelResponse.warnings,\n providerMetadata: currentModelResponse.providerMetadata,\n request: currentModelResponse.request ?? {},\n response: {\n ...currentModelResponse.response,\n // deep clone msgs to avoid mutating past messages in multi-step:\n messages: structuredClone(responseMessages),\n },\n });\n\n logWarnings({\n warnings: currentModelResponse.warnings ?? [],\n provider: stepModel.provider,\n model: stepModel.modelId,\n });\n\n steps.push(currentStepResult);\n await onStepFinish?.(currentStepResult);\n } while (\n // Continue if:\n // 1. There are client tool calls that have all been executed, OR\n // 2. There are pending deferred results from provider-executed tools\n ((clientToolCalls.length > 0 &&\n clientToolOutputs.length === clientToolCalls.length) ||\n pendingDeferredToolCalls.size > 0) &&\n // continue until a stop condition is met:\n !(await isStopConditionMet({ stopConditions, steps }))\n );\n\n // Add response information to the span:\n span.setAttributes(\n await selectTelemetryAttributes({\n telemetry,\n attributes: {\n 'ai.response.finishReason':\n currentModelResponse.finishReason.unified,\n 'ai.response.text': {\n output: () => extractTextContent(currentModelResponse.content),\n },\n 'ai.response.toolCalls': {\n output: () => {\n const toolCalls = asToolCalls(currentModelResponse.content);\n return toolCalls == null\n ? undefined\n : JSON.stringify(toolCalls);\n },\n },\n 'ai.response.providerMetadata': JSON.stringify(\n currentModelResponse.providerMetadata,\n ),\n\n // TODO rename telemetry attributes to inputTokens and outputTokens\n 'ai.usage.promptTokens':\n currentModelResponse.usage.inputTokens.total,\n 'ai.usage.completionTokens':\n currentModelResponse.usage.outputTokens.total,\n },\n }),\n );\n\n const lastStep = steps[steps.length - 1];\n\n const totalUsage = steps.reduce(\n (totalUsage, step) => {\n return addLanguageModelUsage(totalUsage, step.usage);\n },\n {\n inputTokens: undefined,\n outputTokens: undefined,\n totalTokens: undefined,\n reasoningTokens: undefined,\n cachedInputTokens: undefined,\n } as LanguageModelUsage,\n );\n\n await onFinish?.({\n finishReason: lastStep.finishReason,\n rawFinishReason: lastStep.rawFinishReason,\n usage: lastStep.usage,\n content: lastStep.content,\n text: lastStep.text,\n reasoningText: lastStep.reasoningText,\n reasoning: lastStep.reasoning,\n files: lastStep.files,\n sources: lastStep.sources,\n toolCalls: lastStep.toolCalls,\n staticToolCalls: lastStep.staticToolCalls,\n dynamicToolCalls: lastStep.dynamicToolCalls,\n toolResults: lastStep.toolResults,\n staticToolResults: lastStep.staticToolResults,\n dynamicToolResults: lastStep.dynamicToolResults,\n request: lastStep.request,\n response: lastStep.response,\n warnings: lastStep.warnings,\n providerMetadata: lastStep.providerMetadata,\n steps,\n totalUsage,\n experimental_context,\n });\n\n // parse output only if the last step was finished with \"stop\":\n let resolvedOutput;\n if (lastStep.finishReason === 'stop') {\n const outputSpecification = output ?? text();\n resolvedOutput = await outputSpecification.parseCompleteOutput(\n { text: lastStep.text },\n {\n response: lastStep.response,\n usage: lastStep.usage,\n finishReason: lastStep.finishReason,\n },\n );\n }\n\n return new DefaultGenerateTextResult({\n steps,\n totalUsage,\n output: resolvedOutput,\n });\n },\n });\n } catch (error) {\n throw wrapGatewayError(error);\n }\n}\n\nasync function executeTools<TOOLS extends ToolSet>({\n toolCalls,\n tools,\n tracer,\n telemetry,\n messages,\n abortSignal,\n experimental_context,\n}: {\n toolCalls: Array<TypedToolCall<TOOLS>>;\n tools: TOOLS;\n tracer: Tracer;\n telemetry: TelemetrySettings | undefined;\n messages: ModelMessage[];\n abortSignal: AbortSignal | undefined;\n experimental_context: unknown;\n}): Promise<Array<ToolOutput<TOOLS>>> {\n const toolOutputs = await Promise.all(\n toolCalls.map(async toolCall =>\n executeToolCall({\n toolCall,\n tools,\n tracer,\n telemetry,\n messages,\n abortSignal,\n experimental_context,\n }),\n ),\n );\n\n return toolOutputs.filter(\n (output): output is NonNullable<typeof output> => output != null,\n );\n}\n\nclass DefaultGenerateTextResult<TOOLS extends ToolSet, OUTPUT extends Output>\n implements GenerateTextResult<TOOLS, OUTPUT>\n{\n readonly steps: GenerateTextResult<TOOLS, OUTPUT>['steps'];\n readonly totalUsage: LanguageModelUsage;\n private readonly _output: InferCompleteOutput<OUTPUT> | undefined;\n\n constructor(options: {\n steps: GenerateTextResult<TOOLS, OUTPUT>['steps'];\n output: InferCompleteOutput<OUTPUT> | undefined;\n totalUsage: LanguageModelUsage;\n }) {\n this.steps = options.steps;\n this._output = options.output;\n this.totalUsage = options.totalUsage;\n }\n\n private get finalStep() {\n return this.steps[this.steps.length - 1];\n }\n\n get content() {\n return this.finalStep.content;\n }\n\n get text() {\n return this.finalStep.text;\n }\n\n get files() {\n return this.finalStep.files;\n }\n\n get reasoningText() {\n return this.finalStep.reasoningText;\n }\n\n get reasoning() {\n return this.finalStep.reasoning;\n }\n\n get toolCalls() {\n return this.finalStep.toolCalls;\n }\n\n get staticToolCalls() {\n return this.finalStep.staticToolCalls;\n }\n\n get dynamicToolCalls() {\n return this.finalStep.dynamicToolCalls;\n }\n\n get toolResults() {\n return this.finalStep.toolResults;\n }\n\n get staticToolResults() {\n return this.finalStep.staticToolResults;\n }\n\n get dynamicToolResults() {\n return this.finalStep.dynamicToolResults;\n }\n\n get sources() {\n return this.finalStep.sources;\n }\n\n get finishReason() {\n return this.finalStep.finishReason;\n }\n\n get rawFinishReason() {\n return this.finalStep.rawFinishReason;\n }\n\n get warnings() {\n return this.finalStep.warnings;\n }\n\n get providerMetadata() {\n return this.finalStep.providerMetadata;\n }\n\n get response() {\n return this.finalStep.response;\n }\n\n get request() {\n return this.finalStep.request;\n }\n\n get usage() {\n return this.finalStep.usage;\n }\n\n get experimental_output() {\n return this.output;\n }\n\n get output() {\n if (this._output == null) {\n throw new NoOutputGeneratedError();\n }\n\n return this._output;\n }\n}\n\nfunction asToolCalls(content: Array<LanguageModelV3Content>) {\n const parts = content.filter(\n (part): part is LanguageModelV3ToolCall => part.type === 'tool-call',\n );\n\n if (parts.length === 0) {\n return undefined;\n }\n\n return parts.map(toolCall => ({\n toolCallId: toolCall.toolCallId,\n toolName: toolCall.toolName,\n input: toolCall.input,\n }));\n}\n\nfunction asContent<TOOLS extends ToolSet>({\n content,\n toolCalls,\n toolOutputs,\n toolApprovalRequests,\n tools,\n}: {\n content: Array<LanguageModelV3Content>;\n toolCalls: Array<TypedToolCall<TOOLS>>;\n toolOutputs: Array<ToolOutput<TOOLS>>;\n toolApprovalRequests: Array<ToolApprovalRequestOutput<TOOLS>>;\n tools: TOOLS | undefined;\n}): Array<ContentPart<TOOLS>> {\n const contentParts: Array<ContentPart<TOOLS>> = [];\n\n for (const part of content) {\n switch (part.type) {\n case 'text':\n case 'reasoning':\n case 'source':\n contentParts.push(part);\n break;\n\n case 'file': {\n contentParts.push({\n type: 'file' as const,\n file: new DefaultGeneratedFile(part),\n ...(part.providerMetadata != null\n ? { providerMetadata: part.providerMetadata }\n : {}),\n });\n break;\n }\n\n case 'tool-call': {\n contentParts.push(\n toolCalls.find(toolCall => toolCall.toolCallId === part.toolCallId)!,\n );\n break;\n }\n\n case 'tool-result': {\n const toolCall = toolCalls.find(\n toolCall => toolCall.toolCallId === part.toolCallId,\n );\n\n // Handle deferred results for provider-executed tools (e.g., programmatic tool calling).\n // When a server tool (like code_execution) triggers a client tool, the server tool's\n // result may be deferred to a later turn. In this case, there's no matching tool-call\n // in the current response.\n if (toolCall == null) {\n const tool = tools?.[part.toolName];\n const supportsDeferredResults =\n tool?.type === 'provider' && tool.supportsDeferredResults;\n\n if (!supportsDeferredResults) {\n throw new Error(`Tool call ${part.toolCallId} not found.`);\n }\n\n // Create tool result without tool call input (deferred result)\n if (part.isError) {\n contentParts.push({\n type: 'tool-error' as const,\n toolCallId: part.toolCallId,\n toolName: part.toolName as keyof TOOLS & string,\n input: undefined,\n error: part.result,\n providerExecuted: true,\n dynamic: part.dynamic,\n } as TypedToolError<TOOLS>);\n } else {\n contentParts.push({\n type: 'tool-result' as const,\n toolCallId: part.toolCallId,\n toolName: part.toolName as keyof TOOLS & string,\n input: undefined,\n output: part.result,\n providerExecuted: true,\n dynamic: part.dynamic,\n } as TypedToolResult<TOOLS>);\n }\n break;\n }\n\n if (part.isError) {\n contentParts.push({\n type: 'tool-error' as const,\n toolCallId: part.toolCallId,\n toolName: part.toolName as keyof TOOLS & string,\n input: toolCall.input,\n error: part.result,\n providerExecuted: true,\n dynamic: toolCall.dynamic,\n } as TypedToolError<TOOLS>);\n } else {\n contentParts.push({\n type: 'tool-result' as const,\n toolCallId: part.toolCallId,\n toolName: part.toolName as keyof TOOLS & string,\n input: toolCall.input,\n output: part.result,\n providerExecuted: true,\n dynamic: toolCall.dynamic,\n } as TypedToolResult<TOOLS>);\n }\n break;\n }\n\n case 'tool-approval-request': {\n const toolCall = toolCalls.find(\n toolCall => toolCall.toolCallId === part.toolCallId,\n );\n\n if (toolCall == null) {\n throw new ToolCallNotFoundForApprovalError({\n toolCallId: part.toolCallId,\n approvalId: part.approvalId,\n });\n }\n\n contentParts.push({\n type: 'tool-approval-request' as const,\n approvalId: part.approvalId,\n toolCall,\n });\n break;\n }\n }\n }\n\n return [...contentParts, ...toolOutputs, ...toolApprovalRequests];\n}\n","export {\n AISDKError,\n APICallError,\n EmptyResponseBodyError,\n InvalidPromptError,\n InvalidResponseDataError,\n JSONParseError,\n LoadAPIKeyError,\n LoadSettingError,\n NoContentGeneratedError,\n NoSuchModelError,\n TooManyEmbeddingValuesForCallError,\n TypeValidationError,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\n\nexport { InvalidArgumentError } from './invalid-argument-error';\nexport { InvalidStreamPartError } from './invalid-stream-part-error';\nexport { InvalidToolApprovalError } from './invalid-tool-approval-error';\nexport { InvalidToolInputError } from './invalid-tool-input-error';\nexport { ToolCallNotFoundForApprovalError } from './tool-call-not-found-for-approval-error';\nexport { NoImageGeneratedError } from './no-image-generated-error';\nexport { NoObjectGeneratedError } from './no-object-generated-error';\nexport { NoOutputGeneratedError } from './no-output-generated-error';\nexport { NoSpeechGeneratedError } from './no-speech-generated-error';\nexport { NoSuchToolError } from './no-such-tool-error';\nexport { ToolCallRepairError } from './tool-call-repair-error';\nexport { UnsupportedModelVersionError } from './unsupported-model-version-error';\n\nexport { InvalidDataContentError } from '../prompt/invalid-data-content-error';\nexport { InvalidMessageRoleError } fr