UNPKG

ai

Version:

AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript

1 lines 723 kB
{"version":3,"sources":["../src/index.ts","../src/generate-text/generate-text.ts","../src/error/no-output-specified-error.ts","../src/logger/log-warnings.ts","../src/model/resolve-model.ts","../src/error/index.ts","../src/error/invalid-argument-error.ts","../src/error/invalid-stream-part-error.ts","../src/error/invalid-tool-input-error.ts","../src/error/mcp-client-error.ts","../src/error/no-image-generated-error.ts","../src/error/no-object-generated-error.ts","../src/error/no-output-generated-error.ts","../src/error/no-such-tool-error.ts","../src/error/tool-call-repair-error.ts","../src/error/unsupported-model-version-error.ts","../src/prompt/invalid-data-content-error.ts","../src/prompt/invalid-message-role-error.ts","../src/prompt/message-conversion-error.ts","../src/util/download/download-error.ts","../src/util/retry-error.ts","../src/prompt/convert-to-language-model-prompt.ts","../src/util/detect-media-type.ts","../src/util/download/download.ts","../src/util/download/download-function.ts","../src/prompt/data-content.ts","../src/prompt/split-data-url.ts","../src/prompt/prepare-call-settings.ts","../src/prompt/prepare-tools-and-tool-choice.ts","../src/util/is-non-empty-object.ts","../src/prompt/standardize-prompt.ts","../src/prompt/message.ts","../src/types/provider-metadata.ts","../src/types/json-value.ts","../src/prompt/content-part.ts","../src/prompt/wrap-gateway-error.ts","../src/telemetry/assemble-operation-name.ts","../src/telemetry/get-base-telemetry-attributes.ts","../src/telemetry/get-tracer.ts","../src/telemetry/noop-tracer.ts","../src/telemetry/record-span.ts","../src/telemetry/select-telemetry-attributes.ts","../src/telemetry/stringify-for-telemetry.ts","../src/types/usage.ts","../src/util/as-array.ts","../src/util/retry-with-exponential-backoff.ts","../src/util/prepare-retries.ts","../src/generate-text/extract-text-content.ts","../src/generate-text/generated-file.ts","../src/generate-text/parse-tool-call.ts","../src/generate-text/step-result.ts","../src/generate-text/stop-condition.ts","../src/prompt/create-tool-model-output.ts","../src/generate-text/to-response-messages.ts","../src/generate-text/stream-text.ts","../src/util/prepare-headers.ts","../src/text-stream/create-text-stream-response.ts","../src/util/write-to-server-response.ts","../src/text-stream/pipe-text-stream-to-response.ts","../src/ui-message-stream/json-to-sse-transform-stream.ts","../src/ui-message-stream/ui-message-stream-headers.ts","../src/ui-message-stream/create-ui-message-stream-response.ts","../src/ui-message-stream/get-response-ui-message-id.ts","../src/ui/process-ui-message-stream.ts","../src/ui-message-stream/ui-message-chunks.ts","../src/util/merge-objects.ts","../src/util/parse-partial-json.ts","../src/util/fix-json.ts","../src/ui/ui-messages.ts","../src/ui-message-stream/handle-ui-message-stream-finish.ts","../src/ui-message-stream/pipe-ui-message-stream-to-response.ts","../src/util/async-iterable-stream.ts","../src/util/consume-stream.ts","../src/util/create-resolvable-promise.ts","../src/util/create-stitchable-stream.ts","../src/util/delayed-promise.ts","../src/util/now.ts","../src/generate-text/run-tools-transformation.ts","../src/agent/agent.ts","../src/embed/embed.ts","../src/util/split-array.ts","../src/embed/embed-many.ts","../src/generate-image/generate-image.ts","../src/generate-object/generate-object.ts","../src/generate-text/extract-reasoning-content.ts","../src/generate-object/output-strategy.ts","../src/generate-object/parse-and-validate-object-result.ts","../src/generate-object/validate-object-generation-input.ts","../src/generate-object/stream-object.ts","../src/util/cosine-similarity.ts","../src/util/data-url.ts","../src/util/is-deep-equal-data.ts","../src/util/serial-job-executor.ts","../src/util/simulate-readable-stream.ts","../src/error/no-speech-generated-error.ts","../src/generate-speech/generated-audio-file.ts","../src/generate-speech/generate-speech.ts","../src/generate-text/output.ts","../src/generate-text/smooth-stream.ts","../src/middleware/default-settings-middleware.ts","../src/util/get-potential-start-index.ts","../src/middleware/extract-reasoning-middleware.ts","../src/middleware/simulate-streaming-middleware.ts","../src/middleware/wrap-language-model.ts","../src/middleware/wrap-provider.ts","../src/registry/custom-provider.ts","../src/registry/no-such-provider-error.ts","../src/registry/provider-registry.ts","../src/tool/mcp/mcp-client.ts","../src/tool/mcp/mcp-sse-transport.ts","../src/tool/mcp/json-rpc-message.ts","../src/tool/mcp/types.ts","../src/tool/mcp/mcp-transport.ts","../src/error/no-transcript-generated-error.ts","../src/transcribe/transcribe.ts","../src/ui/call-completion-api.ts","../src/ui/process-text-stream.ts","../src/ui/chat.ts","../src/ui/convert-file-list-to-file-ui-parts.ts","../src/ui/default-chat-transport.ts","../src/ui/http-chat-transport.ts","../src/ui/convert-to-model-messages.ts","../src/ui/last-assistant-message-is-complete-with-tool-calls.ts","../src/ui/transform-text-to-ui-message-stream.ts","../src/ui/text-stream-chat-transport.ts","../src/ui/validate-ui-messages.ts","../src/ui-message-stream/create-ui-message-stream.ts","../src/ui-message-stream/read-ui-message-stream.ts"],"sourcesContent":["// re-exports:\nexport {\n asSchema,\n createIdGenerator,\n dynamicTool,\n generateId,\n jsonSchema,\n tool,\n zodSchema,\n type IdGenerator,\n type InferToolInput,\n type InferToolOutput,\n type Schema,\n type Tool,\n type ToolCallOptions,\n type ToolExecuteFunction,\n} from '@ai-sdk/provider-utils';\n\n// directory exports\nexport * from './agent';\nexport * from './embed';\nexport * from './error';\nexport * from './generate-image';\nexport * from './generate-object';\nexport * from './generate-speech';\nexport * from './generate-text';\nexport * from './logger';\nexport * from './middleware';\nexport * from './prompt';\nexport * from './registry';\nexport * from './text-stream';\nexport * from './tool';\nexport * from './transcribe';\nexport * from './types';\nexport * from './ui';\nexport * from './ui-message-stream';\nexport * from './util';\n\n// telemetry types:\nexport type { TelemetrySettings } from './telemetry/telemetry-settings';\n\n// import globals\nimport './global';\n","import {\n LanguageModelV2,\n LanguageModelV2Content,\n LanguageModelV2ToolCall,\n} from '@ai-sdk/provider';\nimport {\n createIdGenerator,\n executeTool,\n getErrorMessage,\n IdGenerator,\n ProviderOptions,\n} from '@ai-sdk/provider-utils';\nimport { Tracer } from '@opentelemetry/api';\nimport { NoOutputSpecifiedError } from '../error/no-output-specified-error';\nimport { logWarnings } from '../logger/log-warnings';\nimport { resolveLanguageModel } from '../model/resolve-model';\nimport { ModelMessage } from '../prompt';\nimport { CallSettings } from '../prompt/call-settings';\nimport { convertToLanguageModelPrompt } from '../prompt/convert-to-language-model-prompt';\nimport { prepareCallSettings } from '../prompt/prepare-call-settings';\nimport { prepareToolsAndToolChoice } from '../prompt/prepare-tools-and-tool-choice';\nimport { Prompt } from '../prompt/prompt';\nimport { standardizePrompt } from '../prompt/standardize-prompt';\nimport { wrapGatewayError } from '../prompt/wrap-gateway-error';\nimport { assembleOperationName } from '../telemetry/assemble-operation-name';\nimport { getBaseTelemetryAttributes } from '../telemetry/get-base-telemetry-attributes';\nimport { getTracer } from '../telemetry/get-tracer';\nimport { recordErrorOnSpan, recordSpan } from '../telemetry/record-span';\nimport { selectTelemetryAttributes } from '../telemetry/select-telemetry-attributes';\nimport { stringifyForTelemetry } from '../telemetry/stringify-for-telemetry';\nimport { TelemetrySettings } from '../telemetry/telemetry-settings';\nimport { LanguageModel, ToolChoice } from '../types';\nimport { addLanguageModelUsage, LanguageModelUsage } from '../types/usage';\nimport { asArray } from '../util/as-array';\nimport { DownloadFunction } from '../util/download/download-function';\nimport { prepareRetries } from '../util/prepare-retries';\nimport { ContentPart } from './content-part';\nimport { extractTextContent } from './extract-text-content';\nimport { GenerateTextResult } from './generate-text-result';\nimport { DefaultGeneratedFile } from './generated-file';\nimport { Output } from './output';\nimport { parseToolCall } from './parse-tool-call';\nimport { PrepareStepFunction } from './prepare-step';\nimport { ResponseMessage } from './response-message';\nimport { DefaultStepResult, StepResult } from './step-result';\nimport {\n isStopConditionMet,\n stepCountIs,\n StopCondition,\n} from './stop-condition';\nimport { toResponseMessages } from './to-response-messages';\nimport { TypedToolCall } from './tool-call';\nimport { ToolCallRepairFunction } from './tool-call-repair-function';\nimport { TypedToolError } from './tool-error';\nimport { ToolOutput } from './tool-output';\nimport { TypedToolResult } from './tool-result';\nimport { ToolSet } from './tool-set';\n\nconst originalGenerateId = createIdGenerator({\n prefix: 'aitxt',\n size: 24,\n});\n\n/**\nCallback that is set using the `onStepFinish` option.\n\n@param stepResult - The result of the step.\n */\nexport type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (\n stepResult: StepResult<TOOLS>,\n) => Promise<void> | void;\n\n/**\nGenerate a text and call tools for a given prompt using a language model.\n\nThis function does not stream the output. If you want to stream the output, use `streamText` instead.\n\n@param model - The language model to use.\n\n@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.\n@param toolChoice - The tool choice strategy. Default: 'auto'.\n\n@param system - A system message that will be part of the prompt.\n@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.\n@param messages - A list of messages. You can either use `prompt` or `messages` but not both.\n\n@param maxOutputTokens - Maximum number of tokens to generate.\n@param temperature - Temperature setting.\nThe value is passed through to the provider. The range depends on the provider and model.\nIt is recommended to set either `temperature` or `topP`, but not both.\n@param topP - Nucleus sampling.\nThe value is passed through to the provider. The range depends on the provider and model.\nIt is recommended to set either `temperature` or `topP`, but not both.\n@param topK - Only sample from the top K options for each subsequent token.\nUsed to remove \"long tail\" low probability responses.\nRecommended for advanced use cases only. You usually only need to use temperature.\n@param presencePenalty - Presence penalty setting.\nIt affects the likelihood of the model to repeat information that is already in the prompt.\nThe value is passed through to the provider. The range depends on the provider and model.\n@param frequencyPenalty - Frequency penalty setting.\nIt affects the likelihood of the model to repeatedly use the same words or phrases.\nThe value is passed through to the provider. The range depends on the provider and model.\n@param stopSequences - Stop sequences.\nIf set, the model will stop generating text when one of the stop sequences is generated.\n@param seed - The seed (integer) to use for random sampling.\nIf set and supported by the model, calls will generate deterministic results.\n\n@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.\n@param abortSignal - An optional abort signal that can be used to cancel the call.\n@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.\n\n@param experimental_generateMessageId - Generate a unique ID for each message.\n\n@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.\n\n@returns\nA result object that contains the generated text, the results of the tool calls, and additional information.\n */\nexport async function generateText<\n TOOLS extends ToolSet,\n OUTPUT = never,\n OUTPUT_PARTIAL = never,\n>({\n model: modelArg,\n tools,\n toolChoice,\n system,\n prompt,\n messages,\n maxRetries: maxRetriesArg,\n abortSignal,\n headers,\n stopWhen = stepCountIs(1),\n experimental_output: output,\n experimental_telemetry: telemetry,\n providerOptions,\n experimental_activeTools,\n activeTools = experimental_activeTools,\n experimental_prepareStep,\n prepareStep = experimental_prepareStep,\n experimental_repairToolCall: repairToolCall,\n experimental_download: download,\n experimental_context,\n _internal: {\n generateId = originalGenerateId,\n currentDate = () => new Date(),\n } = {},\n onStepFinish,\n ...settings\n}: CallSettings &\n Prompt & {\n /**\nThe language model to use.\n */\n model: LanguageModel;\n\n /**\nThe tools that the model can call. The model needs to support calling tools.\n*/\n tools?: TOOLS;\n\n /**\nThe tool choice strategy. Default: 'auto'.\n */\n toolChoice?: ToolChoice<NoInfer<TOOLS>>;\n\n /**\nCondition for stopping the generation when there are tool results in the last step.\nWhen the condition is an array, any of the conditions can be met to stop the generation.\n\n@default stepCountIs(1)\n */\n stopWhen?:\n | StopCondition<NoInfer<TOOLS>>\n | Array<StopCondition<NoInfer<TOOLS>>>;\n\n /**\nOptional telemetry configuration (experimental).\n */\n experimental_telemetry?: TelemetrySettings;\n\n /**\nAdditional provider-specific options. They are passed through\nto the provider from the AI SDK and enable provider-specific\nfunctionality that can be fully encapsulated in the provider.\n */\n providerOptions?: ProviderOptions;\n\n /**\n * @deprecated Use `activeTools` instead.\n */\n experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;\n\n /**\nLimits the tools that are available for the model to call without\nchanging the tool call and result types in the result.\n */\n activeTools?: Array<keyof NoInfer<TOOLS>>;\n\n /**\nOptional specification for parsing structured outputs from the LLM response.\n */\n experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>;\n\n /**\nCustom download function to use for URLs.\n\nBy default, files are downloaded if the model does not support the URL for the given media type.\n */\n experimental_download?: DownloadFunction | undefined;\n\n /**\n * @deprecated Use `prepareStep` instead.\n */\n experimental_prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;\n\n /**\nOptional function that you can use to provide different settings for a step.\n */\n prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;\n\n /**\nA function that attempts to repair a tool call that failed to parse.\n */\n experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>;\n\n /**\n Callback that is called when each step (LLM call) is finished, including intermediate steps.\n */\n onStepFinish?: GenerateTextOnStepFinishCallback<NoInfer<TOOLS>>;\n\n /**\n * Context that is passed into tool execution.\n *\n * Experimental (can break in patch releases).\n *\n * @default undefined\n */\n experimental_context?: unknown;\n\n /**\n * Internal. For test use only. May change without notice.\n */\n _internal?: {\n generateId?: IdGenerator;\n currentDate?: () => Date;\n };\n }): Promise<GenerateTextResult<TOOLS, OUTPUT>> {\n const model = resolveLanguageModel(modelArg);\n const stopConditions = asArray(stopWhen);\n const { maxRetries, retry } = prepareRetries({\n maxRetries: maxRetriesArg,\n abortSignal,\n });\n\n const callSettings = prepareCallSettings(settings);\n\n const baseTelemetryAttributes = getBaseTelemetryAttributes({\n model,\n telemetry,\n headers,\n settings: { ...callSettings, maxRetries },\n });\n\n const initialPrompt = await standardizePrompt({\n system,\n prompt,\n messages,\n } as Prompt);\n\n const tracer = getTracer(telemetry);\n\n try {\n return await recordSpan({\n name: 'ai.generateText',\n attributes: selectTelemetryAttributes({\n telemetry,\n attributes: {\n ...assembleOperationName({\n operationId: 'ai.generateText',\n telemetry,\n }),\n ...baseTelemetryAttributes,\n // model:\n 'ai.model.provider': model.provider,\n 'ai.model.id': model.modelId,\n // specific settings that only make sense on the outer level:\n 'ai.prompt': {\n input: () => JSON.stringify({ system, prompt, messages }),\n },\n },\n }),\n tracer,\n fn: async span => {\n const callSettings = prepareCallSettings(settings);\n\n let currentModelResponse: Awaited<\n ReturnType<LanguageModelV2['doGenerate']>\n > & { response: { id: string; timestamp: Date; modelId: string } };\n let clientToolCalls: Array<TypedToolCall<TOOLS>> = [];\n let clientToolOutputs: Array<ToolOutput<TOOLS>> = [];\n const responseMessages: Array<ResponseMessage> = [];\n const steps: GenerateTextResult<TOOLS, OUTPUT>['steps'] = [];\n\n do {\n const stepInputMessages = [\n ...initialPrompt.messages,\n ...responseMessages,\n ];\n\n const prepareStepResult = await prepareStep?.({\n model,\n steps,\n stepNumber: steps.length,\n messages: stepInputMessages,\n });\n\n const promptMessages = await convertToLanguageModelPrompt({\n prompt: {\n system: prepareStepResult?.system ?? initialPrompt.system,\n messages: prepareStepResult?.messages ?? stepInputMessages,\n },\n supportedUrls: await model.supportedUrls,\n download,\n });\n\n const stepModel = resolveLanguageModel(\n prepareStepResult?.model ?? model,\n );\n\n const { toolChoice: stepToolChoice, tools: stepTools } =\n prepareToolsAndToolChoice({\n tools,\n toolChoice: prepareStepResult?.toolChoice ?? toolChoice,\n activeTools: prepareStepResult?.activeTools ?? activeTools,\n });\n\n currentModelResponse = await retry(() =>\n recordSpan({\n name: 'ai.generateText.doGenerate',\n attributes: selectTelemetryAttributes({\n telemetry,\n attributes: {\n ...assembleOperationName({\n operationId: 'ai.generateText.doGenerate',\n telemetry,\n }),\n ...baseTelemetryAttributes,\n // model:\n 'ai.model.provider': stepModel.provider,\n 'ai.model.id': stepModel.modelId,\n // prompt:\n 'ai.prompt.messages': {\n input: () => stringifyForTelemetry(promptMessages),\n },\n 'ai.prompt.tools': {\n // convert the language model level tools:\n input: () => stepTools?.map(tool => JSON.stringify(tool)),\n },\n 'ai.prompt.toolChoice': {\n input: () =>\n stepToolChoice != null\n ? JSON.stringify(stepToolChoice)\n : undefined,\n },\n\n // standardized gen-ai llm span attributes:\n 'gen_ai.system': stepModel.provider,\n 'gen_ai.request.model': stepModel.modelId,\n 'gen_ai.request.frequency_penalty': settings.frequencyPenalty,\n 'gen_ai.request.max_tokens': settings.maxOutputTokens,\n 'gen_ai.request.presence_penalty': settings.presencePenalty,\n 'gen_ai.request.stop_sequences': settings.stopSequences,\n 'gen_ai.request.temperature':\n settings.temperature ?? undefined,\n 'gen_ai.request.top_k': settings.topK,\n 'gen_ai.request.top_p': settings.topP,\n },\n }),\n tracer,\n fn: async span => {\n const result = await stepModel.doGenerate({\n ...callSettings,\n tools: stepTools,\n toolChoice: stepToolChoice,\n responseFormat: output?.responseFormat,\n prompt: promptMessages,\n providerOptions,\n abortSignal,\n headers,\n });\n\n // Fill in default values:\n const responseData = {\n id: result.response?.id ?? generateId(),\n timestamp: result.response?.timestamp ?? currentDate(),\n modelId: result.response?.modelId ?? stepModel.modelId,\n headers: result.response?.headers,\n body: result.response?.body,\n };\n\n // Add response information to the span:\n span.setAttributes(\n selectTelemetryAttributes({\n telemetry,\n attributes: {\n 'ai.response.finishReason': result.finishReason,\n 'ai.response.text': {\n output: () => extractTextContent(result.content),\n },\n 'ai.response.toolCalls': {\n output: () => {\n const toolCalls = asToolCalls(result.content);\n return toolCalls == null\n ? undefined\n : JSON.stringify(toolCalls);\n },\n },\n 'ai.response.id': responseData.id,\n 'ai.response.model': responseData.modelId,\n 'ai.response.timestamp':\n responseData.timestamp.toISOString(),\n 'ai.response.providerMetadata': JSON.stringify(\n result.providerMetadata,\n ),\n\n // TODO rename telemetry attributes to inputTokens and outputTokens\n 'ai.usage.promptTokens': result.usage.inputTokens,\n 'ai.usage.completionTokens': result.usage.outputTokens,\n\n // standardized gen-ai llm span attributes:\n 'gen_ai.response.finish_reasons': [result.finishReason],\n 'gen_ai.response.id': responseData.id,\n 'gen_ai.response.model': responseData.modelId,\n 'gen_ai.usage.input_tokens': result.usage.inputTokens,\n 'gen_ai.usage.output_tokens': result.usage.outputTokens,\n },\n }),\n );\n\n return { ...result, response: responseData };\n },\n }),\n );\n\n // parse tool calls:\n const stepToolCalls: TypedToolCall<TOOLS>[] = await Promise.all(\n currentModelResponse.content\n .filter(\n (part): part is LanguageModelV2ToolCall =>\n part.type === 'tool-call',\n )\n .map(toolCall =>\n parseToolCall({\n toolCall,\n tools,\n repairToolCall,\n system,\n messages: stepInputMessages,\n }),\n ),\n );\n\n // notify the tools that the tool calls are available:\n for (const toolCall of stepToolCalls) {\n if (toolCall.invalid) {\n continue; // ignore invalid tool calls\n }\n\n const tool = tools![toolCall.toolName];\n if (tool?.onInputAvailable != null) {\n await tool.onInputAvailable({\n input: toolCall.input,\n toolCallId: toolCall.toolCallId,\n messages: stepInputMessages,\n abortSignal,\n experimental_context,\n });\n }\n }\n\n // insert error tool outputs for invalid tool calls:\n // TODO AI SDK 6: invalid inputs should not require output parts\n const invalidToolCalls = stepToolCalls.filter(\n toolCall => toolCall.invalid && toolCall.dynamic,\n );\n\n clientToolOutputs = [];\n\n for (const toolCall of invalidToolCalls) {\n clientToolOutputs.push({\n type: 'tool-error',\n toolCallId: toolCall.toolCallId,\n toolName: toolCall.toolName,\n input: toolCall.input,\n error: getErrorMessage(toolCall.error!),\n dynamic: true,\n });\n }\n\n // execute client tool calls:\n clientToolCalls = stepToolCalls.filter(\n toolCall => !toolCall.providerExecuted,\n );\n\n if (tools != null) {\n clientToolOutputs.push(\n ...(await executeTools({\n toolCalls: clientToolCalls.filter(\n toolCall => !toolCall.invalid,\n ),\n tools,\n tracer,\n telemetry,\n messages: stepInputMessages,\n abortSignal,\n experimental_context,\n })),\n );\n }\n\n // content:\n const stepContent = asContent({\n content: currentModelResponse.content,\n toolCalls: stepToolCalls,\n toolOutputs: clientToolOutputs,\n });\n\n // append to messages for potential next step:\n responseMessages.push(\n ...toResponseMessages({\n content: stepContent,\n tools,\n }),\n );\n\n // Add step information (after response messages are updated):\n const currentStepResult: StepResult<TOOLS> = new DefaultStepResult({\n content: stepContent,\n finishReason: currentModelResponse.finishReason,\n usage: currentModelResponse.usage,\n warnings: currentModelResponse.warnings,\n providerMetadata: currentModelResponse.providerMetadata,\n request: currentModelResponse.request ?? {},\n response: {\n ...currentModelResponse.response,\n // deep clone msgs to avoid mutating past messages in multi-step:\n messages: structuredClone(responseMessages),\n },\n });\n\n logWarnings(currentModelResponse.warnings ?? []);\n\n steps.push(currentStepResult);\n await onStepFinish?.(currentStepResult);\n } while (\n // there are tool calls:\n clientToolCalls.length > 0 &&\n // all current tool calls have outputs (incl. execution errors):\n clientToolOutputs.length === clientToolCalls.length &&\n // continue until a stop condition is met:\n !(await isStopConditionMet({ stopConditions, steps }))\n );\n\n // Add response information to the span:\n span.setAttributes(\n selectTelemetryAttributes({\n telemetry,\n attributes: {\n 'ai.response.finishReason': currentModelResponse.finishReason,\n 'ai.response.text': {\n output: () => extractTextContent(currentModelResponse.content),\n },\n 'ai.response.toolCalls': {\n output: () => {\n const toolCalls = asToolCalls(currentModelResponse.content);\n return toolCalls == null\n ? undefined\n : JSON.stringify(toolCalls);\n },\n },\n 'ai.response.providerMetadata': JSON.stringify(\n currentModelResponse.providerMetadata,\n ),\n\n // TODO rename telemetry attributes to inputTokens and outputTokens\n 'ai.usage.promptTokens': currentModelResponse.usage.inputTokens,\n 'ai.usage.completionTokens':\n currentModelResponse.usage.outputTokens,\n },\n }),\n );\n\n const lastStep = steps[steps.length - 1];\n\n return new DefaultGenerateTextResult({\n steps,\n resolvedOutput: await output?.parseOutput(\n { text: lastStep.text },\n {\n response: lastStep.response,\n usage: lastStep.usage,\n finishReason: lastStep.finishReason,\n },\n ),\n });\n },\n });\n } catch (error) {\n throw wrapGatewayError(error);\n }\n}\n\nasync function executeTools<TOOLS extends ToolSet>({\n toolCalls,\n tools,\n tracer,\n telemetry,\n messages,\n abortSignal,\n experimental_context,\n}: {\n toolCalls: Array<TypedToolCall<TOOLS>>;\n tools: TOOLS;\n tracer: Tracer;\n telemetry: TelemetrySettings | undefined;\n messages: ModelMessage[];\n abortSignal: AbortSignal | undefined;\n experimental_context: unknown;\n}): Promise<Array<ToolOutput<TOOLS>>> {\n const toolOutputs = await Promise.all(\n toolCalls.map(async ({ toolCallId, toolName, input }) => {\n const tool = tools[toolName];\n\n if (tool?.execute == null) {\n return undefined;\n }\n\n return recordSpan({\n name: 'ai.toolCall',\n attributes: selectTelemetryAttributes({\n telemetry,\n attributes: {\n ...assembleOperationName({\n operationId: 'ai.toolCall',\n telemetry,\n }),\n 'ai.toolCall.name': toolName,\n 'ai.toolCall.id': toolCallId,\n 'ai.toolCall.args': {\n output: () => JSON.stringify(input),\n },\n },\n }),\n tracer,\n fn: async span => {\n try {\n const stream = executeTool({\n execute: tool.execute!.bind(tool),\n input,\n options: {\n toolCallId,\n messages,\n abortSignal,\n experimental_context,\n },\n });\n\n let output: unknown;\n for await (const part of stream) {\n if (part.type === 'final') {\n output = part.output;\n }\n }\n try {\n span.setAttributes(\n selectTelemetryAttributes({\n telemetry,\n attributes: {\n 'ai.toolCall.result': {\n output: () => JSON.stringify(output),\n },\n },\n }),\n );\n } catch (ignored) {\n // JSON stringify might fail if the result is not serializable,\n // in which case we just ignore it. In the future we might want to\n // add an optional serialize method to the tool interface and warn\n // if the result is not serializable.\n }\n\n return {\n type: 'tool-result',\n toolCallId,\n toolName,\n input,\n output,\n dynamic: tool.type === 'dynamic',\n } as TypedToolResult<TOOLS>;\n } catch (error) {\n recordErrorOnSpan(span, error);\n return {\n type: 'tool-error',\n toolCallId,\n toolName,\n input,\n error,\n dynamic: tool.type === 'dynamic',\n } as TypedToolError<TOOLS>;\n }\n },\n });\n }),\n );\n\n return toolOutputs.filter(\n (output): output is NonNullable<typeof output> => output != null,\n );\n}\n\nclass DefaultGenerateTextResult<TOOLS extends ToolSet, OUTPUT>\n implements GenerateTextResult<TOOLS, OUTPUT>\n{\n readonly steps: GenerateTextResult<TOOLS, OUTPUT>['steps'];\n\n private readonly resolvedOutput: OUTPUT;\n\n constructor(options: {\n steps: GenerateTextResult<TOOLS, OUTPUT>['steps'];\n resolvedOutput: OUTPUT;\n }) {\n this.steps = options.steps;\n this.resolvedOutput = options.resolvedOutput;\n }\n\n private get finalStep() {\n return this.steps[this.steps.length - 1];\n }\n\n get content() {\n return this.finalStep.content;\n }\n\n get text() {\n return this.finalStep.text;\n }\n\n get files() {\n return this.finalStep.files;\n }\n\n get reasoningText() {\n return this.finalStep.reasoningText;\n }\n\n get reasoning() {\n return this.finalStep.reasoning;\n }\n\n get toolCalls() {\n return this.finalStep.toolCalls;\n }\n\n get staticToolCalls() {\n return this.finalStep.staticToolCalls;\n }\n\n get dynamicToolCalls() {\n return this.finalStep.dynamicToolCalls;\n }\n\n get toolResults() {\n return this.finalStep.toolResults;\n }\n\n get staticToolResults() {\n return this.finalStep.staticToolResults;\n }\n\n get dynamicToolResults() {\n return this.finalStep.dynamicToolResults;\n }\n\n get sources() {\n return this.finalStep.sources;\n }\n\n get finishReason() {\n return this.finalStep.finishReason;\n }\n\n get warnings() {\n return this.finalStep.warnings;\n }\n\n get providerMetadata() {\n return this.finalStep.providerMetadata;\n }\n\n get response() {\n return this.finalStep.response;\n }\n\n get request() {\n return this.finalStep.request;\n }\n\n get usage() {\n return this.finalStep.usage;\n }\n\n get totalUsage() {\n return this.steps.reduce(\n (totalUsage, step) => {\n return addLanguageModelUsage(totalUsage, step.usage);\n },\n {\n inputTokens: undefined,\n outputTokens: undefined,\n totalTokens: undefined,\n reasoningTokens: undefined,\n cachedInputTokens: undefined,\n } as LanguageModelUsage,\n );\n }\n\n get experimental_output() {\n if (this.resolvedOutput == null) {\n throw new NoOutputSpecifiedError();\n }\n\n return this.resolvedOutput;\n }\n}\n\nfunction asToolCalls(content: Array<LanguageModelV2Content>) {\n const parts = content.filter(\n (part): part is LanguageModelV2ToolCall => part.type === 'tool-call',\n );\n\n if (parts.length === 0) {\n return undefined;\n }\n\n return parts.map(toolCall => ({\n toolCallId: toolCall.toolCallId,\n toolName: toolCall.toolName,\n input: toolCall.input,\n }));\n}\n\nfunction asContent<TOOLS extends ToolSet>({\n content,\n toolCalls,\n toolOutputs,\n}: {\n content: Array<LanguageModelV2Content>;\n toolCalls: Array<TypedToolCall<TOOLS>>;\n toolOutputs: Array<ToolOutput<TOOLS>>;\n}): Array<ContentPart<TOOLS>> {\n return [\n ...content.map(part => {\n switch (part.type) {\n case 'text':\n case 'reasoning':\n case 'source':\n return part;\n\n case 'file': {\n return {\n type: 'file' as const,\n file: new DefaultGeneratedFile(part),\n };\n }\n\n case 'tool-call': {\n return toolCalls.find(\n toolCall => toolCall.toolCallId === part.toolCallId,\n )!;\n }\n\n case 'tool-result': {\n const toolCall = toolCalls.find(\n toolCall => toolCall.toolCallId === part.toolCallId,\n )!;\n\n if (toolCall == null) {\n throw new Error(`Tool call ${part.toolCallId} not found.`);\n }\n\n if (part.isError) {\n return {\n type: 'tool-error' as const,\n toolCallId: part.toolCallId,\n toolName: part.toolName as keyof TOOLS & string,\n input: toolCall.input,\n error: part.result,\n providerExecuted: true,\n dynamic: toolCall.dynamic,\n } as TypedToolError<TOOLS>;\n }\n\n return {\n type: 'tool-result' as const,\n toolCallId: part.toolCallId,\n toolName: part.toolName as keyof TOOLS & string,\n input: toolCall.input,\n output: part.result,\n providerExecuted: true,\n dynamic: toolCall.dynamic,\n } as TypedToolResult<TOOLS>;\n }\n }\n }),\n ...toolOutputs,\n ];\n}\n","import { AISDKError } from '@ai-sdk/provider';\n\nconst name = 'AI_NoOutputSpecifiedError';\nconst marker = `vercel.ai.error.${name}`;\nconst symbol = Symbol.for(marker);\n\n/**\nThrown when no output type is specified and output-related methods are called.\n */\nexport class NoOutputSpecifiedError extends AISDKError {\n private readonly [symbol] = true; // used in isInstance\n\n constructor({ message = 'No output specified.' }: { message?: string } = {}) {\n super({ name, message });\n }\n\n static isInstance(error: unknown): error is NoOutputSpecifiedError {\n return AISDKError.hasMarker(error, marker);\n }\n}\n","import {\n ImageModelV2CallWarning,\n LanguageModelV2CallWarning,\n SpeechModelV2CallWarning,\n TranscriptionModelV2CallWarning,\n} from '@ai-sdk/provider';\n\nexport type Warning =\n | LanguageModelV2CallWarning\n | ImageModelV2CallWarning\n | SpeechModelV2CallWarning\n | TranscriptionModelV2CallWarning;\n\nexport type LogWarningsFunction = (warnings: Warning[]) => void;\n\n/**\n * Formats a warning object into a human-readable string with clear AI SDK branding\n */\nfunction formatWarning(warning: Warning): string {\n const prefix = 'AI SDK Warning:';\n\n switch (warning.type) {\n case 'unsupported-setting': {\n let message = `${prefix} The \"${warning.setting}\" setting is not supported by this model`;\n if (warning.details) {\n message += ` - ${warning.details}`;\n }\n return message;\n }\n\n case 'unsupported-tool': {\n const toolName =\n 'name' in warning.tool ? warning.tool.name : 'unknown tool';\n let message = `${prefix} The tool \"${toolName}\" is not supported by this model`;\n if (warning.details) {\n message += ` - ${warning.details}`;\n }\n return message;\n }\n\n case 'other': {\n return `${prefix} ${warning.message}`;\n }\n\n default: {\n // Fallback for any unknown warning types\n return `${prefix} ${JSON.stringify(warning, null, 2)}`;\n }\n }\n}\n\nexport const FIRST_WARNING_INFO_MESSAGE =\n 'AI SDK Warning System: To turn off warning logging, set the AI_SDK_LOG_WARNINGS global to false.';\n\nlet hasLoggedBefore = false;\n\nexport const logWarnings: LogWarningsFunction = warnings => {\n // if the warnings array is empty, do nothing\n if (warnings.length === 0) {\n return;\n }\n\n const logger = globalThis.AI_SDK_LOG_WARNINGS;\n\n // if the logger is set to false, do nothing\n if (logger === false) {\n return;\n }\n\n // use the provided logger if it is a function\n if (typeof logger === 'function') {\n logger(warnings);\n return;\n }\n\n // display information note on first call\n if (!hasLoggedBefore) {\n hasLoggedBefore = true;\n console.info(FIRST_WARNING_INFO_MESSAGE);\n }\n\n // default behavior: log warnings to the console\n for (const warning of warnings) {\n console.warn(formatWarning(warning));\n }\n};\n\n// Reset function for testing purposes\nexport const resetLogWarningsState = () => {\n hasLoggedBefore = false;\n};\n","import { gateway } from '@ai-sdk/gateway';\nimport {\n EmbeddingModelV2,\n LanguageModelV2,\n ProviderV2,\n} from '@ai-sdk/provider';\nimport { UnsupportedModelVersionError } from '../error';\nimport { EmbeddingModel } from '../types/embedding-model';\nimport { LanguageModel } from '../types/language-model';\n\nexport function resolveLanguageModel(model: LanguageModel): LanguageModelV2 {\n if (typeof model !== 'string') {\n if (model.specificationVersion !== 'v2') {\n throw new UnsupportedModelVersionError({\n version: model.specificationVersion,\n provider: model.provider,\n modelId: model.modelId,\n });\n }\n\n return model;\n }\n\n return getGlobalProvider().languageModel(model);\n}\n\nexport function resolveEmbeddingModel<VALUE = string>(\n model: EmbeddingModel<VALUE>,\n): EmbeddingModelV2<VALUE> {\n if (typeof model !== 'string') {\n if (model.specificationVersion !== 'v2') {\n throw new UnsupportedModelVersionError({\n version: model.specificationVersion,\n provider: model.provider,\n modelId: model.modelId,\n });\n }\n\n return model;\n }\n\n // TODO AI SDK 6: figure out how to cleanly support different generic types\n return getGlobalProvider().textEmbeddingModel(\n model,\n ) as EmbeddingModelV2<VALUE>;\n}\n\nfunction getGlobalProvider(): ProviderV2 {\n return globalThis.AI_SDK_DEFAULT_PROVIDER ?? gateway;\n}\n","export {\n AISDKError,\n APICallError,\n EmptyResponseBodyError,\n InvalidPromptError,\n InvalidResponseDataError,\n JSONParseError,\n LoadAPIKeyError,\n NoContentGeneratedError,\n NoSuchModelError,\n TypeValidationError,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\n\nexport { InvalidArgumentError } from './invalid-argument-error';\nexport { InvalidStreamPartError } from './invalid-stream-part-error';\nexport { InvalidToolInputError } from './invalid-tool-input-error';\nexport { MCPClientError } from './mcp-client-error';\nexport { NoImageGeneratedError } from './no-image-generated-error';\nexport { NoObjectGeneratedError } from './no-object-generated-error';\nexport { NoOutputGeneratedError } from './no-output-generated-error';\nexport { NoOutputSpecifiedError } from './no-output-specified-error';\nexport { NoSuchToolError } from './no-such-tool-error';\nexport { ToolCallRepairError } from './tool-call-repair-error';\nexport { UnsupportedModelVersionError } from './unsupported-model-version-error';\n\nexport { InvalidDataContentError } from '../prompt/invalid-data-content-error';\nexport { InvalidMessageRoleError } from '../prompt/invalid-message-role-error';\nexport { MessageConversionError } from '../prompt/message-conversion-error';\nexport { DownloadError } from '../util/download/download-error';\nexport { RetryError } from '../util/retry-error';\n","import { AISDKError } from '@ai-sdk/provider';\n\nconst name = 'AI_InvalidArgumentError';\nconst marker = `vercel.ai.error.${name}`;\nconst symbol = Symbol.for(marker);\n\nexport class InvalidArgumentError extends AISDKError {\n private readonly [symbol] = true; // used in isInstance\n\n readonly parameter: string;\n readonly value: unknown;\n\n constructor({\n parameter,\n value,\n message,\n }: {\n parameter: string;\n value: unknown;\n message: string;\n }) {\n super({\n name,\n message: `Invalid argument for parameter ${parameter}: ${message}`,\n });\n\n this.parameter = parameter;\n this.value = value;\n }\n\n static isInstance(error: unknown): error is InvalidArgumentError {\n return AISDKError.hasMarker(error, marker);\n }\n}\n","import { AISDKError } from '@ai-sdk/provider';\nimport { SingleRequestTextStreamPart } from '../generate-text/run-tools-transformation';\n\nconst name = 'AI_InvalidStreamPartError';\nconst marker = `vercel.ai.error.${name}`;\nconst symbol = Symbol.for(marker);\n\nexport class InvalidStreamPartError extends AISDKError {\n private readonly [symbol] = true; // used in isInstance\n\n readonly chunk: SingleRequestTextStreamPart<any>;\n\n constructor({\n chunk,\n message,\n }: {\n chunk: SingleRequestTextStreamPart<any>;\n message: string;\n }) {\n super({ name, message });\n\n this.chunk = chunk;\n }\n\n static isInstance(error: unknown): error is InvalidStreamPartError {\n return AISDKError.hasMarker(error, marker);\n }\n}\n","import { AISDKError, getErrorMessage } from '@ai-sdk/provider';\n\nconst name = 'AI_InvalidToolInputError';\nconst marker = `vercel.ai.error.${name}`;\nconst symbol = Symbol.for(marker);\n\nexport class InvalidToolInputError extends AISDKError {\n private readonly [symbol] = true; // used in isInstance\n\n readonly toolName: string;\n readonly toolInput: string;\n\n constructor({\n toolInput,\n toolName,\n cause,\n message = `Invalid input for tool ${toolName}: ${getErrorMessage(cause)}`,\n }: {\n message?: string;\n toolInput: string;\n toolName: string;\n cause: unknown;\n }) {\n super({ name, message, cause });\n\n this.toolInput = toolInput;\n this.toolName = toolName;\n }\n\n static isInstance(error: unknown): error is InvalidToolInputError {\n return AISDKError.hasMarker(error, marker);\n }\n}\n","import { AISDKError } from '@ai-sdk/provider';\n\nconst name = 'AI_MCPClientError';\nconst marker = `vercel.ai.error.${name}`;\nconst symbol = Symbol.for(marker);\n\n/**\n * An error occurred with the MCP client.\n */\nexport class MCPClientError extends AISDKError {\n private readonly [symbol] = true;\n\n constructor({\n name = 'MCPClientError',\n message,\n cause,\n }: {\n name?: string;\n message: string;\n cause?: unknown;\n }) {\n super({ name, message, cause });\n }\n\n static isInstance(error: unknown): error is MCPClientError {\n return AISDKError.hasMarker(error, marker);\n }\n}\n","import { AISDKError } from '@ai-sdk/provider';\nimport { ImageModelResponseMetadata } from '../types/image-model-response-metadata';\n\nconst name = 'AI_NoImageGeneratedError';\nconst marker = `vercel.ai.error.${name}`;\nconst symbol = Symbol.for(marker);\n\n/**\nThrown when no image could be generated. This can have multiple causes:\n\n- The model failed to generate a response.\n- The model generated a response that could not be parsed.\n */\nexport class NoImageGeneratedError extends AISDKError {\n private readonly [symbol] = true; // used in isInstance\n\n /**\nThe response metadata for each call.\n */\n readonly responses: Array<ImageModelResponseMetadata> | undefined;\n\n constructor({\n message = 'No image generated.',\n cause,\n responses,\n }: {\n message?: string;\n cause?: Error;\n responses?: Array<ImageModelResponseMetadata>;\n }) {\n super({ name, message, cause });\n\n this.responses = responses;\n }\n\n static isInstance(error: unknown): error is NoImageGeneratedError {\n return AISDKError.hasMarker(error, marker);\n }\n}\n","import { AISDKError } from '@ai-sdk/provider';\nimport { FinishReason } from '../types/language-model';\nimport { LanguageModelResponseMetadata } from '../types/language-model-response-metadata';\nimport { LanguageModelUsage } from '../types/usage';\n\nconst name = 'AI_NoObjectGeneratedError';\nconst marker = `vercel.ai.error.${name}`;\nconst symbol = Symbol.for(marker);\n\n/**\nThrown when no object could be generated. This can have several causes:\n\n- The model failed to generate a response.\n- The model generated a response that could not be parsed.\n- The model generated a response that could not be validated against the schema.\n\nThe error contains the following properties:\n\n- `text`: The text that was generated by the model. This can be the raw text or the tool call text, depending on the model.\n */\nexport class NoObjectGeneratedError extends AISDKError {\n private readonly [symbol] = true; // used in isInstance\n\n /**\n The text that was generated by the model. This can be the raw text or the tool call text, depending on the model.\n */\n readonly text: string | undefined;\n\n /**\n The response metadata.\n */\n readonly response: LanguageModelResponseMetadata | undefined;\n\n /**\n The usage of the model.\n */\n readonly usage: LanguageModelUsage | undefined;\n\n /**\n Reason why the model finished generating a response.\n */\n readonly finishReason: FinishReason | undefined;\n\n constructor({\n message = 'No object generated.',\n cause,\n text,\n response,\n usage,\n finishReason,\n }: {\n message?: string;\n cause?: Error;\n text?: string;\n response: LanguageModelResponseMetadata;\n usage: LanguageModelUsage;\n finishReason: FinishReason;\n }) {\n super({ name, message, cause });\n\n this.text = text;\n this.response = response;\n this.usage = usage;\n this.finishReason = finishReason;\n }\n\n static isInstance(error: unknown): error is NoObjectGeneratedError {\n return AISDKError.hasMarker(error, marker);\n }\n}\n","import { AISDKError } from '@ai-sdk/provider';\n\nconst name = 'AI_NoOutputGeneratedError';\nconst marker = `vercel.ai.error.${name}`;\nconst symbol = Symbol.for(marker);\n\n/**\nThrown when no LLM output was generated, e.g. because of errors.\n */\nexport class NoOutputGeneratedError extends AISDKError {\n private readonly [symbol] = true; // used in isInstance\n\n constructor({\n message = 'No output generated.',\n cause,\n }: {\n message?: string;\n cause?: Error;\n } = {}) {\n super({ name, message, cause });\n }\n\n static isInstance(error: unknown): error is NoOutputGeneratedError {\n return AISDKError.hasMarker(error, marker);\n }\n}\n","import { AISDKError } from '@ai-sdk/provider';\n\nconst name = 'AI_NoSuchToolError';\nconst marker = `vercel.ai.error.${name}`;\nconst symbol = Symbol.for(marker);\n\nexport class NoSuchToolError extends AISDKError