ai
Version:
AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript
1 lines • 949 kB
Source Map (JSON)
{"version":3,"sources":["../src/index.ts","../src/generate-text/generate-text.ts","../src/error/index.ts","../src/error/invalid-argument-error.ts","../src/error/invalid-stream-part-error.ts","../src/error/invalid-tool-approval-error.ts","../src/error/invalid-tool-input-error.ts","../src/error/tool-call-not-found-for-approval-error.ts","../src/error/missing-tool-result-error.ts","../src/error/no-image-generated-error.ts","../src/error/no-object-generated-error.ts","../src/error/no-output-generated-error.ts","../src/error/no-speech-generated-error.ts","../src/error/no-transcript-generated-error.ts","../src/error/no-video-generated-error.ts","../src/error/no-such-tool-error.ts","../src/error/tool-call-repair-error.ts","../src/error/unsupported-model-version-error.ts","../src/error/ui-message-stream-error.ts","../src/prompt/invalid-data-content-error.ts","../src/prompt/invalid-message-role-error.ts","../src/prompt/message-conversion-error.ts","../src/util/retry-error.ts","../src/logger/log-warnings.ts","../src/model/resolve-model.ts","../src/util/log-v2-compatibility-warning.ts","../src/model/as-embedding-model-v3.ts","../src/model/as-image-model-v3.ts","../src/model/as-language-model-v3.ts","../src/model/as-speech-model-v3.ts","../src/model/as-transcription-model-v3.ts","../src/prompt/call-settings.ts","../src/prompt/convert-to-language-model-prompt.ts","../src/util/detect-media-type.ts","../src/util/download/download.ts","../src/version.ts","../src/util/download/download-function.ts","../src/prompt/data-content.ts","../src/prompt/split-data-url.ts","../src/util/as-array.ts","../src/prompt/create-tool-model-output.ts","../src/prompt/prepare-call-settings.ts","../src/prompt/prepare-tools-and-tool-choice.ts","../src/util/is-non-empty-object.ts","../src/prompt/standardize-prompt.ts","../src/prompt/message.ts","../src/types/provider-metadata.ts","../src/types/json-value.ts","../src/prompt/content-part.ts","../src/prompt/wrap-gateway-error.ts","../src/telemetry/assemble-operation-name.ts","../src/telemetry/get-base-telemetry-attributes.ts","../src/telemetry/get-tracer.ts","../src/telemetry/noop-tracer.ts","../src/telemetry/record-span.ts","../src/telemetry/select-telemetry-attributes.ts","../src/telemetry/stringify-for-telemetry.ts","../src/types/usage.ts","../src/util/merge-objects.ts","../src/util/retry-with-exponential-backoff.ts","../src/util/prepare-retries.ts","../src/generate-text/collect-tool-approvals.ts","../src/generate-text/execute-tool-call.ts","../src/generate-text/extract-reasoning-content.ts","../src/generate-text/extract-text-content.ts","../src/generate-text/generated-file.ts","../src/generate-text/is-approval-needed.ts","../src/generate-text/output.ts","../src/util/parse-partial-json.ts","../src/util/fix-json.ts","../src/generate-text/parse-tool-call.ts","../src/generate-text/step-result.ts","../src/generate-text/stop-condition.ts","../src/generate-text/to-response-messages.ts","../src/util/merge-abort-signals.ts","../src/generate-text/stream-text.ts","../src/util/prepare-headers.ts","../src/text-stream/create-text-stream-response.ts","../src/util/write-to-server-response.ts","../src/text-stream/pipe-text-stream-to-response.ts","../src/ui-message-stream/json-to-sse-transform-stream.ts","../src/ui-message-stream/ui-message-stream-headers.ts","../src/ui-message-stream/create-ui-message-stream-response.ts","../src/ui-message-stream/get-response-ui-message-id.ts","../src/ui/process-ui-message-stream.ts","../src/ui-message-stream/ui-message-chunks.ts","../src/ui/ui-messages.ts","../src/ui-message-stream/handle-ui-message-stream-finish.ts","../src/ui-message-stream/pipe-ui-message-stream-to-response.ts","../src/util/async-iterable-stream.ts","../src/util/consume-stream.ts","../src/util/create-resolvable-promise.ts","../src/util/create-stitchable-stream.ts","../src/util/now.ts","../src/generate-text/run-tools-transformation.ts","../src/agent/tool-loop-agent.ts","../src/ui-message-stream/create-ui-message-stream.ts","../src/ui-message-stream/read-ui-message-stream.ts","../src/ui/convert-to-model-messages.ts","../src/ui/validate-ui-messages.ts","../src/agent/create-agent-ui-stream.ts","../src/agent/create-agent-ui-stream-response.ts","../src/agent/pipe-agent-ui-stream-to-response.ts","../src/embed/embed.ts","../src/embed/embed-many.ts","../src/util/split-array.ts","../src/generate-image/generate-image.ts","../src/generate-image/index.ts","../src/generate-object/generate-object.ts","../src/generate-object/output-strategy.ts","../src/generate-object/parse-and-validate-object-result.ts","../src/generate-object/validate-object-generation-input.ts","../src/generate-object/stream-object.ts","../src/util/cosine-similarity.ts","../src/util/data-url.ts","../src/util/is-deep-equal-data.ts","../src/util/serial-job-executor.ts","../src/util/simulate-readable-stream.ts","../src/generate-speech/generate-speech.ts","../src/generate-speech/generated-audio-file.ts","../src/generate-text/prune-messages.ts","../src/generate-text/smooth-stream.ts","../src/generate-video/generate-video.ts","../src/middleware/default-embedding-settings-middleware.ts","../src/middleware/default-settings-middleware.ts","../src/middleware/extract-json-middleware.ts","../src/util/get-potential-start-index.ts","../src/middleware/extract-reasoning-middleware.ts","../src/middleware/simulate-streaming-middleware.ts","../src/middleware/add-tool-input-examples-middleware.ts","../src/middleware/wrap-language-model.ts","../src/middleware/wrap-embedding-model.ts","../src/middleware/wrap-image-model.ts","../src/model/as-provider-v3.ts","../src/middleware/wrap-provider.ts","../src/registry/custom-provider.ts","../src/registry/no-such-provider-error.ts","../src/registry/provider-registry.ts","../src/rerank/rerank.ts","../src/transcribe/transcribe.ts","../src/ui/call-completion-api.ts","../src/ui/process-text-stream.ts","../src/ui/chat.ts","../src/ui/convert-file-list-to-file-ui-parts.ts","../src/ui/default-chat-transport.ts","../src/ui/http-chat-transport.ts","../src/ui/direct-chat-transport.ts","../src/ui/last-assistant-message-is-complete-with-approval-responses.ts","../src/ui/last-assistant-message-is-complete-with-tool-calls.ts","../src/ui/transform-text-to-ui-message-stream.ts","../src/ui/text-stream-chat-transport.ts"],"sourcesContent":["// re-exports:\nexport { createGateway, gateway, type GatewayModelId } from '@ai-sdk/gateway';\nexport {\n asSchema,\n createIdGenerator,\n dynamicTool,\n generateId,\n jsonSchema,\n parseJsonEventStream,\n tool,\n zodSchema,\n type FlexibleSchema,\n type IdGenerator,\n type InferSchema,\n type InferToolInput,\n type InferToolOutput,\n type Schema,\n type Tool,\n type ToolApprovalRequest,\n type ToolApprovalResponse,\n type ToolCallOptions,\n type ToolExecutionOptions,\n type ToolExecuteFunction,\n} from '@ai-sdk/provider-utils';\n\n// directory exports\nexport * from './agent';\nexport * from './embed';\nexport * from './error';\nexport * from './generate-image';\nexport * from './generate-object';\nexport * from './generate-speech';\nexport * from './generate-text';\nexport * from './generate-video';\nexport * from './logger';\nexport * from './middleware';\nexport * from './prompt';\nexport * from './registry';\nexport * from './rerank';\nexport * from './text-stream';\nexport * from './transcribe';\nexport * from './types';\nexport * from './ui';\nexport * from './ui-message-stream';\nexport * from './util';\n\n// telemetry types:\nexport type { TelemetrySettings } from './telemetry/telemetry-settings';\n\n// import globals\nimport './global';\n","import {\n LanguageModelV3,\n LanguageModelV3Content,\n LanguageModelV3ToolCall,\n} from '@ai-sdk/provider';\nimport {\n createIdGenerator,\n getErrorMessage,\n IdGenerator,\n ProviderOptions,\n ToolApprovalResponse,\n withUserAgentSuffix,\n} from '@ai-sdk/provider-utils';\nimport { Tracer } from '@opentelemetry/api';\nimport { NoOutputGeneratedError } from '../error';\nimport { logWarnings } from '../logger/log-warnings';\nimport { resolveLanguageModel } from '../model/resolve-model';\nimport { ModelMessage } from '../prompt';\nimport {\n CallSettings,\n getStepTimeoutMs,\n getTotalTimeoutMs,\n} from '../prompt/call-settings';\nimport { convertToLanguageModelPrompt } from '../prompt/convert-to-language-model-prompt';\nimport { createToolModelOutput } from '../prompt/create-tool-model-output';\nimport { prepareCallSettings } from '../prompt/prepare-call-settings';\nimport { prepareToolsAndToolChoice } from '../prompt/prepare-tools-and-tool-choice';\nimport { Prompt } from '../prompt/prompt';\nimport { standardizePrompt } from '../prompt/standardize-prompt';\nimport { wrapGatewayError } from '../prompt/wrap-gateway-error';\nimport { ToolCallNotFoundForApprovalError } from '../error/tool-call-not-found-for-approval-error';\nimport { assembleOperationName } from '../telemetry/assemble-operation-name';\nimport { getBaseTelemetryAttributes } from '../telemetry/get-base-telemetry-attributes';\nimport { getTracer } from '../telemetry/get-tracer';\nimport { recordSpan } from '../telemetry/record-span';\nimport { selectTelemetryAttributes } from '../telemetry/select-telemetry-attributes';\nimport { stringifyForTelemetry } from '../telemetry/stringify-for-telemetry';\nimport { TelemetrySettings } from '../telemetry/telemetry-settings';\nimport {\n LanguageModel,\n LanguageModelRequestMetadata,\n ToolChoice,\n} from '../types';\nimport {\n addLanguageModelUsage,\n asLanguageModelUsage,\n LanguageModelUsage,\n} from '../types/usage';\nimport { asArray } from '../util/as-array';\nimport { DownloadFunction } from '../util/download/download-function';\nimport { mergeObjects } from '../util/merge-objects';\nimport { prepareRetries } from '../util/prepare-retries';\nimport { VERSION } from '../version';\nimport { collectToolApprovals } from './collect-tool-approvals';\nimport { ContentPart } from './content-part';\nimport { executeToolCall } from './execute-tool-call';\nimport { extractReasoningContent } from './extract-reasoning-content';\nimport { extractTextContent } from './extract-text-content';\nimport { GenerateTextResult } from './generate-text-result';\nimport { DefaultGeneratedFile } from './generated-file';\nimport { isApprovalNeeded } from './is-approval-needed';\nimport { Output, text } from './output';\nimport { InferCompleteOutput } from './output-utils';\nimport { parseToolCall } from './parse-tool-call';\nimport { PrepareStepFunction } from './prepare-step';\nimport { ResponseMessage } from './response-message';\nimport { DefaultStepResult, StepResult } from './step-result';\nimport {\n isStopConditionMet,\n stepCountIs,\n StopCondition,\n} from './stop-condition';\nimport { toResponseMessages } from './to-response-messages';\nimport { ToolApprovalRequestOutput } from './tool-approval-request-output';\nimport { TypedToolCall } from './tool-call';\nimport { ToolCallRepairFunction } from './tool-call-repair-function';\nimport { TypedToolError } from './tool-error';\nimport { ToolOutput } from './tool-output';\nimport { TypedToolResult } from './tool-result';\nimport { ToolSet } from './tool-set';\nimport { mergeAbortSignals } from '../util/merge-abort-signals';\n\nconst originalGenerateId = createIdGenerator({\n prefix: 'aitxt',\n size: 24,\n});\n\n/**\n * Callback that is set using the `onStepFinish` option.\n *\n * @param stepResult - The result of the step.\n */\nexport type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (\n stepResult: StepResult<TOOLS>,\n) => Promise<void> | void;\n\n/**\n * Callback that is set using the `onFinish` option.\n *\n * @param event - The event that is passed to the callback.\n */\nexport type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (\n event: StepResult<TOOLS> & {\n /**\n * Details for all steps.\n */\n readonly steps: StepResult<TOOLS>[];\n\n /**\n * Total usage for all steps. This is the sum of the usage of all steps.\n */\n readonly totalUsage: LanguageModelUsage;\n\n /**\n * Context that is passed into tool execution.\n *\n * Experimental (can break in patch releases).\n *\n * @default undefined\n */\n experimental_context: unknown;\n },\n) => PromiseLike<void> | void;\n\n/**\n * Generate a text and call tools for a given prompt using a language model.\n *\n * This function does not stream the output. If you want to stream the output, use `streamText` instead.\n *\n * @param model - The language model to use.\n *\n * @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.\n * @param toolChoice - The tool choice strategy. Default: 'auto'.\n *\n * @param system - A system message that will be part of the prompt.\n * @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.\n * @param messages - A list of messages. You can either use `prompt` or `messages` but not both.\n *\n * @param maxOutputTokens - Maximum number of tokens to generate.\n * @param temperature - Temperature setting.\n * The value is passed through to the provider. The range depends on the provider and model.\n * It is recommended to set either `temperature` or `topP`, but not both.\n * @param topP - Nucleus sampling.\n * The value is passed through to the provider. The range depends on the provider and model.\n * It is recommended to set either `temperature` or `topP`, but not both.\n * @param topK - Only sample from the top K options for each subsequent token.\n * Used to remove \"long tail\" low probability responses.\n * Recommended for advanced use cases only. You usually only need to use temperature.\n * @param presencePenalty - Presence penalty setting.\n * It affects the likelihood of the model to repeat information that is already in the prompt.\n * The value is passed through to the provider. The range depends on the provider and model.\n * @param frequencyPenalty - Frequency penalty setting.\n * It affects the likelihood of the model to repeatedly use the same words or phrases.\n * The value is passed through to the provider. The range depends on the provider and model.\n * @param stopSequences - Stop sequences.\n * If set, the model will stop generating text when one of the stop sequences is generated.\n * @param seed - The seed (integer) to use for random sampling.\n * If set and supported by the model, calls will generate deterministic results.\n *\n * @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.\n * @param abortSignal - An optional abort signal that can be used to cancel the call.\n * @param timeout - An optional timeout in milliseconds. The call will be aborted if it takes longer than the specified timeout.\n * @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.\n *\n * @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.\n * @param onFinish - Callback that is called when all steps are finished and the response is complete.\n *\n * @returns\n * A result object that contains the generated text, the results of the tool calls, and additional information.\n */\nexport async function generateText<\n TOOLS extends ToolSet,\n OUTPUT extends Output = Output<string, string>,\n>({\n model: modelArg,\n tools,\n toolChoice,\n system,\n prompt,\n messages,\n maxRetries: maxRetriesArg,\n abortSignal,\n timeout,\n headers,\n stopWhen = stepCountIs(1),\n experimental_output,\n output = experimental_output,\n experimental_telemetry: telemetry,\n providerOptions,\n experimental_activeTools,\n activeTools = experimental_activeTools,\n experimental_prepareStep,\n prepareStep = experimental_prepareStep,\n experimental_repairToolCall: repairToolCall,\n experimental_download: download,\n experimental_context,\n experimental_include: include,\n _internal: { generateId = originalGenerateId } = {},\n onStepFinish,\n onFinish,\n ...settings\n}: CallSettings &\n Prompt & {\n /**\n * The language model to use.\n */\n model: LanguageModel;\n\n /**\n * The tools that the model can call. The model needs to support calling tools.\n */\n tools?: TOOLS;\n\n /**\n * The tool choice strategy. Default: 'auto'.\n */\n toolChoice?: ToolChoice<NoInfer<TOOLS>>;\n\n /**\n * Condition for stopping the generation when there are tool results in the last step.\n * When the condition is an array, any of the conditions can be met to stop the generation.\n *\n * @default stepCountIs(1)\n */\n stopWhen?:\n | StopCondition<NoInfer<TOOLS>>\n | Array<StopCondition<NoInfer<TOOLS>>>;\n\n /**\n * Optional telemetry configuration (experimental).\n */\n experimental_telemetry?: TelemetrySettings;\n\n /**\n * Additional provider-specific options. They are passed through\n * to the provider from the AI SDK and enable provider-specific\n * functionality that can be fully encapsulated in the provider.\n */\n providerOptions?: ProviderOptions;\n\n /**\n * @deprecated Use `activeTools` instead.\n */\n experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;\n\n /**\n * Limits the tools that are available for the model to call without\n * changing the tool call and result types in the result.\n */\n activeTools?: Array<keyof NoInfer<TOOLS>>;\n\n /**\n * Optional specification for parsing structured outputs from the LLM response.\n */\n output?: OUTPUT;\n\n /**\n * Optional specification for parsing structured outputs from the LLM response.\n *\n * @deprecated Use `output` instead.\n */\n experimental_output?: OUTPUT;\n\n /**\n * Custom download function to use for URLs.\n *\n * By default, files are downloaded if the model does not support the URL for the given media type.\n */\n experimental_download?: DownloadFunction | undefined;\n\n /**\n * @deprecated Use `prepareStep` instead.\n */\n experimental_prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;\n\n /**\n * Optional function that you can use to provide different settings for a step.\n */\n prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;\n\n /**\n * A function that attempts to repair a tool call that failed to parse.\n */\n experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>;\n\n /**\n * Callback that is called when each step (LLM call) is finished, including intermediate steps.\n */\n onStepFinish?: GenerateTextOnStepFinishCallback<NoInfer<TOOLS>>;\n\n /**\n * Callback that is called when all steps are finished and the response is complete.\n */\n onFinish?: GenerateTextOnFinishCallback<NoInfer<TOOLS>>;\n\n /**\n * Context that is passed into tool execution.\n *\n * Experimental (can break in patch releases).\n *\n * @default undefined\n */\n experimental_context?: unknown;\n\n /**\n * Settings for controlling what data is included in step results.\n * Disabling inclusion can help reduce memory usage when processing\n * large payloads like images.\n *\n * By default, all data is included for backwards compatibility.\n */\n experimental_include?: {\n /**\n * Whether to retain the request body in step results.\n * The request body can be large when sending images or files.\n * @default true\n */\n requestBody?: boolean;\n\n /**\n * Whether to retain the response body in step results.\n * @default true\n */\n responseBody?: boolean;\n };\n\n /**\n * Internal. For test use only. May change without notice.\n */\n _internal?: {\n generateId?: IdGenerator;\n };\n }): Promise<GenerateTextResult<TOOLS, OUTPUT>> {\n const model = resolveLanguageModel(modelArg);\n const stopConditions = asArray(stopWhen);\n\n const totalTimeoutMs = getTotalTimeoutMs(timeout);\n const stepTimeoutMs = getStepTimeoutMs(timeout);\n const stepAbortController =\n stepTimeoutMs != null ? new AbortController() : undefined;\n const mergedAbortSignal = mergeAbortSignals(\n abortSignal,\n totalTimeoutMs != null ? AbortSignal.timeout(totalTimeoutMs) : undefined,\n stepAbortController?.signal,\n );\n\n const { maxRetries, retry } = prepareRetries({\n maxRetries: maxRetriesArg,\n abortSignal: mergedAbortSignal,\n });\n\n const callSettings = prepareCallSettings(settings);\n\n const headersWithUserAgent = withUserAgentSuffix(\n headers ?? {},\n `ai/${VERSION}`,\n );\n\n const baseTelemetryAttributes = getBaseTelemetryAttributes({\n model,\n telemetry,\n headers: headersWithUserAgent,\n settings: { ...callSettings, maxRetries },\n });\n\n const initialPrompt = await standardizePrompt({\n system,\n prompt,\n messages,\n } as Prompt);\n\n const tracer = getTracer(telemetry);\n\n try {\n return await recordSpan({\n name: 'ai.generateText',\n attributes: selectTelemetryAttributes({\n telemetry,\n attributes: {\n ...assembleOperationName({\n operationId: 'ai.generateText',\n telemetry,\n }),\n ...baseTelemetryAttributes,\n // model:\n 'ai.model.provider': model.provider,\n 'ai.model.id': model.modelId,\n // specific settings that only make sense on the outer level:\n 'ai.prompt': {\n input: () => JSON.stringify({ system, prompt, messages }),\n },\n },\n }),\n tracer,\n fn: async span => {\n const initialMessages = initialPrompt.messages;\n const responseMessages: Array<ResponseMessage> = [];\n\n const { approvedToolApprovals, deniedToolApprovals } =\n collectToolApprovals<TOOLS>({ messages: initialMessages });\n\n const localApprovedToolApprovals = approvedToolApprovals.filter(\n toolApproval => !toolApproval.toolCall.providerExecuted,\n );\n\n if (\n deniedToolApprovals.length > 0 ||\n localApprovedToolApprovals.length > 0\n ) {\n const toolOutputs = await executeTools({\n toolCalls: localApprovedToolApprovals.map(\n toolApproval => toolApproval.toolCall,\n ),\n tools: tools as TOOLS,\n tracer,\n telemetry,\n messages: initialMessages,\n abortSignal: mergedAbortSignal,\n experimental_context,\n });\n\n const toolContent: Array<any> = [];\n\n // add regular tool results for approved tool calls:\n for (const output of toolOutputs) {\n const modelOutput = await createToolModelOutput({\n toolCallId: output.toolCallId,\n input: output.input,\n tool: tools?.[output.toolName],\n output:\n output.type === 'tool-result' ? output.output : output.error,\n errorMode: output.type === 'tool-error' ? 'json' : 'none',\n });\n\n toolContent.push({\n type: 'tool-result' as const,\n toolCallId: output.toolCallId,\n toolName: output.toolName,\n output: modelOutput,\n });\n }\n\n // add execution denied tool results for all denied tool approvals:\n for (const toolApproval of deniedToolApprovals) {\n toolContent.push({\n type: 'tool-result' as const,\n toolCallId: toolApproval.toolCall.toolCallId,\n toolName: toolApproval.toolCall.toolName,\n output: {\n type: 'execution-denied' as const,\n reason: toolApproval.approvalResponse.reason,\n // For provider-executed tools, include approvalId so provider can correlate\n ...(toolApproval.toolCall.providerExecuted && {\n providerOptions: {\n openai: {\n approvalId: toolApproval.approvalResponse.approvalId,\n },\n },\n }),\n },\n });\n }\n\n responseMessages.push({\n role: 'tool',\n content: toolContent,\n });\n }\n\n // Forward provider-executed approval responses to the provider\n const providerExecutedToolApprovals = [\n ...approvedToolApprovals,\n ...deniedToolApprovals,\n ].filter(toolApproval => toolApproval.toolCall.providerExecuted);\n\n if (providerExecutedToolApprovals.length > 0) {\n responseMessages.push({\n role: 'tool',\n content: providerExecutedToolApprovals.map(\n toolApproval =>\n ({\n type: 'tool-approval-response',\n approvalId: toolApproval.approvalResponse.approvalId,\n approved: toolApproval.approvalResponse.approved,\n reason: toolApproval.approvalResponse.reason,\n providerExecuted: true,\n }) satisfies ToolApprovalResponse,\n ),\n });\n }\n\n const callSettings = prepareCallSettings(settings);\n\n let currentModelResponse: Awaited<\n ReturnType<LanguageModelV3['doGenerate']>\n > & { response: { id: string; timestamp: Date; modelId: string } };\n let clientToolCalls: Array<TypedToolCall<TOOLS>> = [];\n let clientToolOutputs: Array<ToolOutput<TOOLS>> = [];\n const steps: GenerateTextResult<TOOLS, OUTPUT>['steps'] = [];\n\n // Track provider-executed tool calls that support deferred results\n // (e.g., code_execution in programmatic tool calling scenarios).\n // These tools may not return their results in the same turn as their call.\n const pendingDeferredToolCalls = new Map<\n string,\n { toolName: string }\n >();\n\n do {\n // Set up step timeout if configured\n const stepTimeoutId =\n stepTimeoutMs != null\n ? setTimeout(() => stepAbortController!.abort(), stepTimeoutMs)\n : undefined;\n\n try {\n const stepInputMessages = [...initialMessages, ...responseMessages];\n\n const prepareStepResult = await prepareStep?.({\n model,\n steps,\n stepNumber: steps.length,\n messages: stepInputMessages,\n experimental_context,\n });\n\n const stepModel = resolveLanguageModel(\n prepareStepResult?.model ?? model,\n );\n\n const promptMessages = await convertToLanguageModelPrompt({\n prompt: {\n system: prepareStepResult?.system ?? initialPrompt.system,\n messages: prepareStepResult?.messages ?? stepInputMessages,\n },\n supportedUrls: await stepModel.supportedUrls,\n download,\n });\n\n experimental_context =\n prepareStepResult?.experimental_context ?? experimental_context;\n\n const { toolChoice: stepToolChoice, tools: stepTools } =\n await prepareToolsAndToolChoice({\n tools,\n toolChoice: prepareStepResult?.toolChoice ?? toolChoice,\n activeTools: prepareStepResult?.activeTools ?? activeTools,\n });\n\n currentModelResponse = await retry(() =>\n recordSpan({\n name: 'ai.generateText.doGenerate',\n attributes: selectTelemetryAttributes({\n telemetry,\n attributes: {\n ...assembleOperationName({\n operationId: 'ai.generateText.doGenerate',\n telemetry,\n }),\n ...baseTelemetryAttributes,\n // model:\n 'ai.model.provider': stepModel.provider,\n 'ai.model.id': stepModel.modelId,\n // prompt:\n 'ai.prompt.messages': {\n input: () => stringifyForTelemetry(promptMessages),\n },\n 'ai.prompt.tools': {\n // convert the language model level tools:\n input: () => stepTools?.map(tool => JSON.stringify(tool)),\n },\n 'ai.prompt.toolChoice': {\n input: () =>\n stepToolChoice != null\n ? JSON.stringify(stepToolChoice)\n : undefined,\n },\n\n // standardized gen-ai llm span attributes:\n 'gen_ai.system': stepModel.provider,\n 'gen_ai.request.model': stepModel.modelId,\n 'gen_ai.request.frequency_penalty':\n settings.frequencyPenalty,\n 'gen_ai.request.max_tokens': settings.maxOutputTokens,\n 'gen_ai.request.presence_penalty': settings.presencePenalty,\n 'gen_ai.request.stop_sequences': settings.stopSequences,\n 'gen_ai.request.temperature':\n settings.temperature ?? undefined,\n 'gen_ai.request.top_k': settings.topK,\n 'gen_ai.request.top_p': settings.topP,\n },\n }),\n tracer,\n fn: async span => {\n const stepProviderOptions = mergeObjects(\n providerOptions,\n prepareStepResult?.providerOptions,\n );\n\n const result = await stepModel.doGenerate({\n ...callSettings,\n tools: stepTools,\n toolChoice: stepToolChoice,\n responseFormat: await output?.responseFormat,\n prompt: promptMessages,\n providerOptions: stepProviderOptions,\n abortSignal: mergedAbortSignal,\n headers: headersWithUserAgent,\n });\n\n // Fill in default values:\n const responseData = {\n id: result.response?.id ?? generateId(),\n timestamp: result.response?.timestamp ?? new Date(),\n modelId: result.response?.modelId ?? stepModel.modelId,\n headers: result.response?.headers,\n body: result.response?.body,\n };\n\n // Add response information to the span:\n span.setAttributes(\n await selectTelemetryAttributes({\n telemetry,\n attributes: {\n 'ai.response.finishReason': result.finishReason.unified,\n 'ai.response.text': {\n output: () => extractTextContent(result.content),\n },\n 'ai.response.reasoning': {\n output: () => extractReasoningContent(result.content),\n },\n 'ai.response.toolCalls': {\n output: () => {\n const toolCalls = asToolCalls(result.content);\n return toolCalls == null\n ? undefined\n : JSON.stringify(toolCalls);\n },\n },\n 'ai.response.id': responseData.id,\n 'ai.response.model': responseData.modelId,\n 'ai.response.timestamp':\n responseData.timestamp.toISOString(),\n 'ai.response.providerMetadata': JSON.stringify(\n result.providerMetadata,\n ),\n\n // TODO rename telemetry attributes to inputTokens and outputTokens\n 'ai.usage.promptTokens': result.usage.inputTokens.total,\n 'ai.usage.completionTokens':\n result.usage.outputTokens.total,\n\n // standardized gen-ai llm span attributes:\n 'gen_ai.response.finish_reasons': [\n result.finishReason.unified,\n ],\n 'gen_ai.response.id': responseData.id,\n 'gen_ai.response.model': responseData.modelId,\n 'gen_ai.usage.input_tokens':\n result.usage.inputTokens.total,\n 'gen_ai.usage.output_tokens':\n result.usage.outputTokens.total,\n },\n }),\n );\n\n return { ...result, response: responseData };\n },\n }),\n );\n\n // parse tool calls:\n const stepToolCalls: TypedToolCall<TOOLS>[] = await Promise.all(\n currentModelResponse.content\n .filter(\n (part): part is LanguageModelV3ToolCall =>\n part.type === 'tool-call',\n )\n .map(toolCall =>\n parseToolCall({\n toolCall,\n tools,\n repairToolCall,\n system,\n messages: stepInputMessages,\n }),\n ),\n );\n const toolApprovalRequests: Record<\n string,\n ToolApprovalRequestOutput<TOOLS>\n > = {};\n\n // notify the tools that the tool calls are available:\n for (const toolCall of stepToolCalls) {\n if (toolCall.invalid) {\n continue; // ignore invalid tool calls\n }\n\n const tool = tools?.[toolCall.toolName];\n\n if (tool == null) {\n // ignore tool calls for tools that are not available,\n // e.g. provider-executed dynamic tools\n continue;\n }\n\n if (tool?.onInputAvailable != null) {\n await tool.onInputAvailable({\n input: toolCall.input,\n toolCallId: toolCall.toolCallId,\n messages: stepInputMessages,\n abortSignal: mergedAbortSignal,\n experimental_context,\n });\n }\n\n if (\n await isApprovalNeeded({\n tool,\n toolCall,\n messages: stepInputMessages,\n experimental_context,\n })\n ) {\n toolApprovalRequests[toolCall.toolCallId] = {\n type: 'tool-approval-request',\n approvalId: generateId(),\n toolCall,\n };\n }\n }\n\n // insert error tool outputs for invalid tool calls:\n // TODO AI SDK 6: invalid inputs should not require output parts\n const invalidToolCalls = stepToolCalls.filter(\n toolCall => toolCall.invalid && toolCall.dynamic,\n );\n\n clientToolOutputs = [];\n\n for (const toolCall of invalidToolCalls) {\n clientToolOutputs.push({\n type: 'tool-error',\n toolCallId: toolCall.toolCallId,\n toolName: toolCall.toolName,\n input: toolCall.input,\n error: getErrorMessage(toolCall.error!),\n dynamic: true,\n });\n }\n\n // execute client tool calls:\n clientToolCalls = stepToolCalls.filter(\n toolCall => !toolCall.providerExecuted,\n );\n\n if (tools != null) {\n clientToolOutputs.push(\n ...(await executeTools({\n toolCalls: clientToolCalls.filter(\n toolCall =>\n !toolCall.invalid &&\n toolApprovalRequests[toolCall.toolCallId] == null,\n ),\n tools,\n tracer,\n telemetry,\n messages: stepInputMessages,\n abortSignal: mergedAbortSignal,\n experimental_context,\n })),\n );\n }\n\n // Track provider-executed tool calls that support deferred results.\n // In programmatic tool calling, a server tool (e.g., code_execution) may\n // trigger a client tool, and the server tool's result is deferred until\n // the client tool's result is sent back.\n for (const toolCall of stepToolCalls) {\n if (!toolCall.providerExecuted) continue;\n const tool = tools?.[toolCall.toolName];\n if (tool?.type === 'provider' && tool.supportsDeferredResults) {\n // Check if this tool call already has a result in the current response\n const hasResultInResponse = currentModelResponse.content.some(\n part =>\n part.type === 'tool-result' &&\n part.toolCallId === toolCall.toolCallId,\n );\n if (!hasResultInResponse) {\n pendingDeferredToolCalls.set(toolCall.toolCallId, {\n toolName: toolCall.toolName,\n });\n }\n }\n }\n\n // Mark deferred tool calls as resolved when we receive their results\n for (const part of currentModelResponse.content) {\n if (part.type === 'tool-result') {\n pendingDeferredToolCalls.delete(part.toolCallId);\n }\n }\n\n // content:\n const stepContent = asContent({\n content: currentModelResponse.content,\n toolCalls: stepToolCalls,\n toolOutputs: clientToolOutputs,\n toolApprovalRequests: Object.values(toolApprovalRequests),\n tools,\n });\n\n // append to messages for potential next step:\n responseMessages.push(\n ...(await toResponseMessages({\n content: stepContent,\n tools,\n })),\n );\n\n // Add step information (after response messages are updated):\n // Conditionally include request.body and response.body based on include settings.\n // Large payloads (e.g., base64-encoded images) can cause memory issues.\n const stepRequest: LanguageModelRequestMetadata =\n (include?.requestBody ?? true)\n ? (currentModelResponse.request ?? {})\n : { ...currentModelResponse.request, body: undefined };\n\n const stepResponse = {\n ...currentModelResponse.response,\n // deep clone msgs to avoid mutating past messages in multi-step:\n messages: structuredClone(responseMessages),\n // Conditionally include response body:\n body:\n (include?.responseBody ?? true)\n ? currentModelResponse.response?.body\n : undefined,\n };\n\n const currentStepResult: StepResult<TOOLS> = new DefaultStepResult({\n content: stepContent,\n finishReason: currentModelResponse.finishReason.unified,\n rawFinishReason: currentModelResponse.finishReason.raw,\n usage: asLanguageModelUsage(currentModelResponse.usage),\n warnings: currentModelResponse.warnings,\n providerMetadata: currentModelResponse.providerMetadata,\n request: stepRequest,\n response: stepResponse,\n });\n\n logWarnings({\n warnings: currentModelResponse.warnings ?? [],\n provider: stepModel.provider,\n model: stepModel.modelId,\n });\n\n steps.push(currentStepResult);\n await onStepFinish?.(currentStepResult);\n } finally {\n if (stepTimeoutId != null) {\n clearTimeout(stepTimeoutId);\n }\n }\n } while (\n // Continue if:\n // 1. There are client tool calls that have all been executed, OR\n // 2. There are pending deferred results from provider-executed tools\n ((clientToolCalls.length > 0 &&\n clientToolOutputs.length === clientToolCalls.length) ||\n pendingDeferredToolCalls.size > 0) &&\n // continue until a stop condition is met:\n !(await isStopConditionMet({ stopConditions, steps }))\n );\n\n // Add response information to the span:\n span.setAttributes(\n await selectTelemetryAttributes({\n telemetry,\n attributes: {\n 'ai.response.finishReason':\n currentModelResponse.finishReason.unified,\n 'ai.response.text': {\n output: () => extractTextContent(currentModelResponse.content),\n },\n 'ai.response.reasoning': {\n output: () =>\n extractReasoningContent(currentModelResponse.content),\n },\n 'ai.response.toolCalls': {\n output: () => {\n const toolCalls = asToolCalls(currentModelResponse.content);\n return toolCalls == null\n ? undefined\n : JSON.stringify(toolCalls);\n },\n },\n 'ai.response.providerMetadata': JSON.stringify(\n currentModelResponse.providerMetadata,\n ),\n\n // TODO rename telemetry attributes to inputTokens and outputTokens\n 'ai.usage.promptTokens':\n currentModelResponse.usage.inputTokens.total,\n 'ai.usage.completionTokens':\n currentModelResponse.usage.outputTokens.total,\n },\n }),\n );\n\n const lastStep = steps[steps.length - 1];\n\n const totalUsage = steps.reduce(\n (totalUsage, step) => {\n return addLanguageModelUsage(totalUsage, step.usage);\n },\n {\n inputTokens: undefined,\n outputTokens: undefined,\n totalTokens: undefined,\n reasoningTokens: undefined,\n cachedInputTokens: undefined,\n } as LanguageModelUsage,\n );\n\n await onFinish?.({\n finishReason: lastStep.finishReason,\n rawFinishReason: lastStep.rawFinishReason,\n usage: lastStep.usage,\n content: lastStep.content,\n text: lastStep.text,\n reasoningText: lastStep.reasoningText,\n reasoning: lastStep.reasoning,\n files: lastStep.files,\n sources: lastStep.sources,\n toolCalls: lastStep.toolCalls,\n staticToolCalls: lastStep.staticToolCalls,\n dynamicToolCalls: lastStep.dynamicToolCalls,\n toolResults: lastStep.toolResults,\n staticToolResults: lastStep.staticToolResults,\n dynamicToolResults: lastStep.dynamicToolResults,\n request: lastStep.request,\n response: lastStep.response,\n warnings: lastStep.warnings,\n providerMetadata: lastStep.providerMetadata,\n steps,\n totalUsage,\n experimental_context,\n });\n\n // parse output only if the last step was finished with \"stop\":\n let resolvedOutput;\n if (lastStep.finishReason === 'stop') {\n const outputSpecification = output ?? text();\n resolvedOutput = await outputSpecification.parseCompleteOutput(\n { text: lastStep.text },\n {\n response: lastStep.response,\n usage: lastStep.usage,\n finishReason: lastStep.finishReason,\n },\n );\n }\n\n return new DefaultGenerateTextResult({\n steps,\n totalUsage,\n output: resolvedOutput,\n });\n },\n });\n } catch (error) {\n throw wrapGatewayError(error);\n }\n}\n\nasync function executeTools<TOOLS extends ToolSet>({\n toolCalls,\n tools,\n tracer,\n telemetry,\n messages,\n abortSignal,\n experimental_context,\n}: {\n toolCalls: Array<TypedToolCall<TOOLS>>;\n tools: TOOLS;\n tracer: Tracer;\n telemetry: TelemetrySettings | undefined;\n messages: ModelMessage[];\n abortSignal: AbortSignal | undefined;\n experimental_context: unknown;\n}): Promise<Array<ToolOutput<TOOLS>>> {\n const toolOutputs = await Promise.all(\n toolCalls.map(async toolCall =>\n executeToolCall({\n toolCall,\n tools,\n tracer,\n telemetry,\n messages,\n abortSignal,\n experimental_context,\n }),\n ),\n );\n\n return toolOutputs.filter(\n (output): output is NonNullable<typeof output> => output != null,\n );\n}\n\nclass DefaultGenerateTextResult<TOOLS extends ToolSet, OUTPUT extends Output>\n implements GenerateTextResult<TOOLS, OUTPUT>\n{\n readonly steps: GenerateTextResult<TOOLS, OUTPUT>['steps'];\n readonly totalUsage: LanguageModelUsage;\n private readonly _output: InferCompleteOutput<OUTPUT> | undefined;\n\n constructor(options: {\n steps: GenerateTextResult<TOOLS, OUTPUT>['steps'];\n output: InferCompleteOutput<OUTPUT> | undefined;\n totalUsage: LanguageModelUsage;\n }) {\n this.steps = options.steps;\n this._output = options.output;\n this.totalUsage = options.totalUsage;\n }\n\n private get finalStep() {\n return this.steps[this.steps.length - 1];\n }\n\n get content() {\n return this.finalStep.content;\n }\n\n get text() {\n return this.finalStep.text;\n }\n\n get files() {\n return this.finalStep.files;\n }\n\n get reasoningText() {\n return this.finalStep.reasoningText;\n }\n\n get reasoning() {\n return this.finalStep.reasoning;\n }\n\n get toolCalls() {\n return this.finalStep.toolCalls;\n }\n\n get staticToolCalls() {\n return this.finalStep.staticToolCalls;\n }\n\n get dynamicToolCalls() {\n return this.finalStep.dynamicToolCalls;\n }\n\n get toolResults() {\n return this.finalStep.toolResults;\n }\n\n get staticToolResults() {\n return this.finalStep.staticToolResults;\n }\n\n get dynamicToolResults() {\n return this.finalStep.dynamicToolResults;\n }\n\n get sources() {\n return this.finalStep.sources;\n }\n\n get finishReason() {\n return this.finalStep.finishReason;\n }\n\n get rawFinishReason() {\n return this.finalStep.rawFinishReason;\n }\n\n get warnings() {\n return this.finalStep.warnings;\n }\n\n get providerMetadata() {\n return this.finalStep.providerMetadata;\n }\n\n get response() {\n return this.finalStep.response;\n }\n\n get request() {\n return this.finalStep.request;\n }\n\n get usage() {\n return this.finalStep.usage;\n }\n\n get experimental_output() {\n return this.output;\n }\n\n get output() {\n if (this._output == null) {\n throw new NoOutputGeneratedError();\n }\n\n return this._output;\n }\n}\n\nfunction asToolCalls(content: Array<LanguageModelV3Content>) {\n const parts = content.filter(\n (part): part is LanguageModelV3ToolCall => part.type === 'tool-call',\n );\n\n if (parts.length === 0) {\n return undefined;\n }\n\n return parts.map(toolCall => ({\n toolCallId: toolCall.toolCallId,\n toolName: toolCall.toolName,\n input: toolCall.input,\n }));\n}\n\nfunction asContent<TOOLS extends ToolSet>({\n content,\n toolCalls,\n toolOutputs,\n toolApprovalRequests,\n tools,\n}: {\n content: Array<LanguageModelV3Content>;\n toolCalls: Array<TypedToolCall<TOOLS>>;\n toolOutputs: Array<ToolOutput<TOOLS>>;\n toolApprovalRequests: Array<ToolApprovalRequestOutput<TOOLS>>;\n tools: TOOLS | undefined;\n}): Array<ContentPart<TOOLS>> {\n const contentParts: Array<ContentPart<TOOLS>> = [];\n\n for (const part of content) {\n switch (part.type) {\n case 'text':\n case 'reasoning':\n case 'source':\n contentParts.push(part);\n break;\n\n case 'file': {\n contentParts.push({\n type: 'file' as const,\n file: new DefaultGeneratedFile(part),\n ...(part.providerMetadata != null\n ? { providerMetadata: part.providerMetadata }\n : {}),\n });\n break;\n }\n\n case 'tool-call': {\n contentParts.push(\n toolCalls.find(toolCall => toolCall.toolCallId === part.toolCallId)!,\n );\n break;\n }\n\n case 'tool-result': {\n const toolCall = toolCalls.find(\n toolCall => toolCall.toolCallId === part.toolCallId,\n );\n\n // Handle deferred results for provider-executed tools (e.g., programmatic tool calling).\n // When a server tool (like code_execution) triggers a client tool, the server tool's\n // result may be deferred to a later turn. In this case, there's no matching tool-call\n