ai
Version:
AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript
1 lines • 677 kB
Source Map (JSON)
{"version":3,"sources":["../src/index.ts","../src/generate-text/generate-text.ts","../src/error/no-output-specified-error.ts","../src/util/as-array.ts","../src/error/invalid-argument-error.ts","../src/util/retry-with-exponential-backoff.ts","../src/util/retry-error.ts","../src/util/prepare-retries.ts","../src/prompt/convert-to-language-model-prompt.ts","../src/util/detect-media-type.ts","../src/util/download-error.ts","../src/util/download.ts","../src/prompt/data-content.ts","../src/prompt/invalid-data-content-error.ts","../src/prompt/split-data-url.ts","../src/prompt/invalid-message-role-error.ts","../src/prompt/prepare-call-settings.ts","../src/prompt/prepare-tools-and-tool-choice.ts","../src/util/is-non-empty-object.ts","../src/prompt/resolve-language-model.ts","../src/error/index.ts","../src/error/invalid-stream-part-error.ts","../src/error/invalid-tool-input-error.ts","../src/error/mcp-client-error.ts","../src/error/no-image-generated-error.ts","../src/error/no-object-generated-error.ts","../src/error/no-such-tool-error.ts","../src/error/tool-call-repair-error.ts","../src/error/unsupported-model-version-error.ts","../src/prompt/message-conversion-error.ts","../src/prompt/standardize-prompt.ts","../src/prompt/message.ts","../src/types/provider-metadata.ts","../src/types/json-value.ts","../src/prompt/content-part.ts","../src/prompt/wrap-gateway-error.ts","../src/telemetry/assemble-operation-name.ts","../src/telemetry/get-base-telemetry-attributes.ts","../src/telemetry/get-tracer.ts","../src/telemetry/noop-tracer.ts","../src/telemetry/record-span.ts","../src/telemetry/select-telemetry-attributes.ts","../src/telemetry/stringify-for-telemetry.ts","../src/types/usage.ts","../src/generate-text/extract-content-text.ts","../src/generate-text/generated-file.ts","../src/generate-text/parse-tool-call.ts","../src/generate-text/step-result.ts","../src/generate-text/stop-condition.ts","../src/prompt/create-tool-model-output.ts","../src/generate-text/to-response-messages.ts","../src/generate-text/stream-text.ts","../src/util/prepare-headers.ts","../src/text-stream/create-text-stream-response.ts","../src/util/write-to-server-response.ts","../src/text-stream/pipe-text-stream-to-response.ts","../src/ui-message-stream/json-to-sse-transform-stream.ts","../src/ui-message-stream/ui-message-stream-headers.ts","../src/ui-message-stream/create-ui-message-stream-response.ts","../src/ui-message-stream/get-response-ui-message-id.ts","../src/ui/process-ui-message-stream.ts","../src/ui-message-stream/ui-message-chunks.ts","../src/util/merge-objects.ts","../src/util/parse-partial-json.ts","../src/util/fix-json.ts","../src/ui/ui-messages.ts","../src/ui-message-stream/handle-ui-message-stream-finish.ts","../src/ui-message-stream/pipe-ui-message-stream-to-response.ts","../src/util/async-iterable-stream.ts","../src/util/consume-stream.ts","../src/util/create-resolvable-promise.ts","../src/util/create-stitchable-stream.ts","../src/util/delayed-promise.ts","../src/util/now.ts","../src/util/filter-stream-errors.ts","../src/generate-text/run-tools-transformation.ts","../src/agent/agent.ts","../src/embed/embed.ts","../src/util/split-array.ts","../src/embed/embed-many.ts","../src/generate-image/generate-image.ts","../src/generate-object/generate-object.ts","../src/generate-object/output-strategy.ts","../src/generate-object/validate-object-generation-input.ts","../src/generate-object/parse-and-validate-object-result.ts","../src/generate-object/stream-object.ts","../src/util/cosine-similarity.ts","../src/util/data-url.ts","../src/util/is-deep-equal-data.ts","../src/util/serial-job-executor.ts","../src/util/simulate-readable-stream.ts","../src/error/no-speech-generated-error.ts","../src/generate-speech/generated-audio-file.ts","../src/generate-speech/generate-speech.ts","../src/generate-text/output.ts","../src/generate-text/smooth-stream.ts","../src/middleware/default-settings-middleware.ts","../src/util/get-potential-start-index.ts","../src/middleware/extract-reasoning-middleware.ts","../src/middleware/simulate-streaming-middleware.ts","../src/middleware/wrap-language-model.ts","../src/middleware/wrap-provider.ts","../src/registry/custom-provider.ts","../src/registry/no-such-provider-error.ts","../src/registry/provider-registry.ts","../src/tool/mcp/mcp-client.ts","../src/tool/mcp/mcp-sse-transport.ts","../src/tool/mcp/json-rpc-message.ts","../src/tool/mcp/types.ts","../src/tool/mcp/mcp-transport.ts","../src/error/no-transcript-generated-error.ts","../src/transcribe/transcribe.ts","../src/ui/call-completion-api.ts","../src/ui/process-text-stream.ts","../src/ui/chat.ts","../src/ui/convert-file-list-to-file-ui-parts.ts","../src/ui/default-chat-transport.ts","../src/ui/http-chat-transport.ts","../src/ui/convert-to-model-messages.ts","../src/ui/last-assistant-message-is-complete-with-tool-calls.ts","../src/ui/transform-text-to-ui-message-stream.ts","../src/ui/text-stream-chat-transport.ts","../src/ui-message-stream/create-ui-message-stream.ts","../src/ui-message-stream/read-ui-message-stream.ts"],"sourcesContent":["// re-exports:\nexport {\n asSchema,\n createIdGenerator,\n dynamicTool,\n generateId,\n jsonSchema,\n tool,\n zodSchema,\n type IdGenerator,\n type InferToolInput,\n type InferToolOutput,\n type Schema,\n type Tool,\n type ToolCallOptions,\n type ToolExecuteFunction,\n} from '@ai-sdk/provider-utils';\n\n// directory exports\nexport * from './agent';\nexport * from './embed';\nexport * from './error';\nexport * from './generate-image';\nexport * from './generate-object';\nexport * from './generate-speech';\nexport * from './generate-text';\nexport * from './middleware';\nexport * from './prompt';\nexport * from './registry';\nexport * from './text-stream';\nexport * from './tool';\nexport * from './transcribe';\nexport * from './types';\nexport * from './ui';\nexport * from './ui-message-stream';\nexport * from './util';\n\n// telemetry types:\nexport type { TelemetrySettings } from './telemetry/telemetry-settings';\n\n// import globals\nimport './global';\n","import {\n LanguageModelV2,\n LanguageModelV2Content,\n LanguageModelV2ToolCall,\n} from '@ai-sdk/provider';\nimport {\n createIdGenerator,\n IdGenerator,\n ProviderOptions,\n} from '@ai-sdk/provider-utils';\nimport { Tracer } from '@opentelemetry/api';\nimport { NoOutputSpecifiedError } from '../error/no-output-specified-error';\nimport { asArray } from '../util/as-array';\nimport { prepareRetries } from '../util/prepare-retries';\nimport { ModelMessage } from '../prompt';\nimport { CallSettings } from '../prompt/call-settings';\nimport { convertToLanguageModelPrompt } from '../prompt/convert-to-language-model-prompt';\nimport { prepareCallSettings } from '../prompt/prepare-call-settings';\nimport { prepareToolsAndToolChoice } from '../prompt/prepare-tools-and-tool-choice';\nimport { Prompt } from '../prompt/prompt';\nimport { resolveLanguageModel } from '../prompt/resolve-language-model';\nimport { standardizePrompt } from '../prompt/standardize-prompt';\nimport { wrapGatewayError } from '../prompt/wrap-gateway-error';\nimport { assembleOperationName } from '../telemetry/assemble-operation-name';\nimport { getBaseTelemetryAttributes } from '../telemetry/get-base-telemetry-attributes';\nimport { getTracer } from '../telemetry/get-tracer';\nimport { recordErrorOnSpan, recordSpan } from '../telemetry/record-span';\nimport { selectTelemetryAttributes } from '../telemetry/select-telemetry-attributes';\nimport { stringifyForTelemetry } from '../telemetry/stringify-for-telemetry';\nimport { TelemetrySettings } from '../telemetry/telemetry-settings';\nimport { LanguageModel, ToolChoice } from '../types';\nimport { addLanguageModelUsage, LanguageModelUsage } from '../types/usage';\nimport { ContentPart } from './content-part';\nimport { extractContentText } from './extract-content-text';\nimport { GenerateTextResult } from './generate-text-result';\nimport { DefaultGeneratedFile } from './generated-file';\nimport { Output } from './output';\nimport { parseToolCall } from './parse-tool-call';\nimport { PrepareStepFunction } from './prepare-step';\nimport { ResponseMessage } from './response-message';\nimport { DefaultStepResult, StepResult } from './step-result';\nimport {\n isStopConditionMet,\n stepCountIs,\n StopCondition,\n} from './stop-condition';\nimport { toResponseMessages } from './to-response-messages';\nimport { TypedToolCall } from './tool-call';\nimport { ToolCallRepairFunction } from './tool-call-repair-function';\nimport { ToolOutput } from './tool-output';\nimport { ToolSet } from './tool-set';\nimport { TypedToolResult } from './tool-result';\nimport { TypedToolError } from './tool-error';\n\nconst originalGenerateId = createIdGenerator({\n prefix: 'aitxt',\n size: 24,\n});\n\n/**\nCallback that is set using the `onStepFinish` option.\n\n@param stepResult - The result of the step.\n */\nexport type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (\n stepResult: StepResult<TOOLS>,\n) => Promise<void> | void;\n\n/**\nGenerate a text and call tools for a given prompt using a language model.\n\nThis function does not stream the output. If you want to stream the output, use `streamText` instead.\n\n@param model - The language model to use.\n\n@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.\n@param toolChoice - The tool choice strategy. Default: 'auto'.\n\n@param system - A system message that will be part of the prompt.\n@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.\n@param messages - A list of messages. You can either use `prompt` or `messages` but not both.\n\n@param maxOutputTokens - Maximum number of tokens to generate.\n@param temperature - Temperature setting.\nThe value is passed through to the provider. The range depends on the provider and model.\nIt is recommended to set either `temperature` or `topP`, but not both.\n@param topP - Nucleus sampling.\nThe value is passed through to the provider. The range depends on the provider and model.\nIt is recommended to set either `temperature` or `topP`, but not both.\n@param topK - Only sample from the top K options for each subsequent token.\nUsed to remove \"long tail\" low probability responses.\nRecommended for advanced use cases only. You usually only need to use temperature.\n@param presencePenalty - Presence penalty setting.\nIt affects the likelihood of the model to repeat information that is already in the prompt.\nThe value is passed through to the provider. The range depends on the provider and model.\n@param frequencyPenalty - Frequency penalty setting.\nIt affects the likelihood of the model to repeatedly use the same words or phrases.\nThe value is passed through to the provider. The range depends on the provider and model.\n@param stopSequences - Stop sequences.\nIf set, the model will stop generating text when one of the stop sequences is generated.\n@param seed - The seed (integer) to use for random sampling.\nIf set and supported by the model, calls will generate deterministic results.\n\n@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.\n@param abortSignal - An optional abort signal that can be used to cancel the call.\n@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.\n\n@param experimental_generateMessageId - Generate a unique ID for each message.\n\n@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.\n\n@returns\nA result object that contains the generated text, the results of the tool calls, and additional information.\n */\nexport async function generateText<\n TOOLS extends ToolSet,\n OUTPUT = never,\n OUTPUT_PARTIAL = never,\n>({\n model: modelArg,\n tools,\n toolChoice,\n system,\n prompt,\n messages,\n maxRetries: maxRetriesArg,\n abortSignal,\n headers,\n stopWhen = stepCountIs(1),\n experimental_output: output,\n experimental_telemetry: telemetry,\n providerOptions,\n experimental_activeTools,\n activeTools = experimental_activeTools,\n experimental_prepareStep,\n prepareStep = experimental_prepareStep,\n experimental_repairToolCall: repairToolCall,\n _internal: {\n generateId = originalGenerateId,\n currentDate = () => new Date(),\n } = {},\n onStepFinish,\n ...settings\n}: CallSettings &\n Prompt & {\n /**\nThe language model to use.\n */\n model: LanguageModel;\n\n /**\nThe tools that the model can call. The model needs to support calling tools.\n*/\n tools?: TOOLS;\n\n /**\nThe tool choice strategy. Default: 'auto'.\n */\n toolChoice?: ToolChoice<NoInfer<TOOLS>>;\n\n /**\nCondition for stopping the generation when there are tool results in the last step.\nWhen the condition is an array, any of the conditions can be met to stop the generation.\n\n@default stepCountIs(1)\n */\n stopWhen?:\n | StopCondition<NoInfer<TOOLS>>\n | Array<StopCondition<NoInfer<TOOLS>>>;\n\n /**\nOptional telemetry configuration (experimental).\n */\n experimental_telemetry?: TelemetrySettings;\n\n /**\nAdditional provider-specific options. They are passed through\nto the provider from the AI SDK and enable provider-specific\nfunctionality that can be fully encapsulated in the provider.\n */\n providerOptions?: ProviderOptions;\n\n /**\n * @deprecated Use `activeTools` instead.\n */\n experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;\n\n /**\nLimits the tools that are available for the model to call without\nchanging the tool call and result types in the result.\n */\n activeTools?: Array<keyof NoInfer<TOOLS>>;\n\n /**\nOptional specification for parsing structured outputs from the LLM response.\n */\n experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>;\n\n /**\n * @deprecated Use `prepareStep` instead.\n */\n experimental_prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;\n\n /**\nOptional function that you can use to provide different settings for a step.\n */\n prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;\n\n /**\nA function that attempts to repair a tool call that failed to parse.\n */\n experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>;\n\n /**\n Callback that is called when each step (LLM call) is finished, including intermediate steps.\n */\n onStepFinish?: GenerateTextOnStepFinishCallback<NoInfer<TOOLS>>;\n\n /**\n * Internal. For test use only. May change without notice.\n */\n _internal?: {\n generateId?: IdGenerator;\n currentDate?: () => Date;\n };\n }): Promise<GenerateTextResult<TOOLS, OUTPUT>> {\n const model = resolveLanguageModel(modelArg);\n const stopConditions = asArray(stopWhen);\n const { maxRetries, retry } = prepareRetries({\n maxRetries: maxRetriesArg,\n abortSignal,\n });\n\n const callSettings = prepareCallSettings(settings);\n\n const baseTelemetryAttributes = getBaseTelemetryAttributes({\n model,\n telemetry,\n headers,\n settings: { ...callSettings, maxRetries },\n });\n\n const initialPrompt = await standardizePrompt({\n system,\n prompt,\n messages,\n });\n\n const tracer = getTracer(telemetry);\n\n try {\n return await recordSpan({\n name: 'ai.generateText',\n attributes: selectTelemetryAttributes({\n telemetry,\n attributes: {\n ...assembleOperationName({\n operationId: 'ai.generateText',\n telemetry,\n }),\n ...baseTelemetryAttributes,\n // model:\n 'ai.model.provider': model.provider,\n 'ai.model.id': model.modelId,\n // specific settings that only make sense on the outer level:\n 'ai.prompt': {\n input: () => JSON.stringify({ system, prompt, messages }),\n },\n },\n }),\n tracer,\n fn: async span => {\n const callSettings = prepareCallSettings(settings);\n\n let currentModelResponse: Awaited<\n ReturnType<LanguageModelV2['doGenerate']>\n > & { response: { id: string; timestamp: Date; modelId: string } };\n let clientToolCalls: Array<TypedToolCall<TOOLS>> = [];\n let clientToolOutputs: Array<ToolOutput<TOOLS>> = [];\n const responseMessages: Array<ResponseMessage> = [];\n const steps: GenerateTextResult<TOOLS, OUTPUT>['steps'] = [];\n\n do {\n const stepInputMessages = [\n ...initialPrompt.messages,\n ...responseMessages,\n ];\n\n const prepareStepResult = await prepareStep?.({\n model,\n steps,\n stepNumber: steps.length,\n messages: stepInputMessages,\n });\n\n const promptMessages = await convertToLanguageModelPrompt({\n prompt: {\n system: prepareStepResult?.system ?? initialPrompt.system,\n messages: prepareStepResult?.messages ?? stepInputMessages,\n },\n supportedUrls: await model.supportedUrls,\n });\n\n const stepModel = resolveLanguageModel(\n prepareStepResult?.model ?? model,\n );\n\n const { toolChoice: stepToolChoice, tools: stepTools } =\n prepareToolsAndToolChoice({\n tools,\n toolChoice: prepareStepResult?.toolChoice ?? toolChoice,\n activeTools: prepareStepResult?.activeTools ?? activeTools,\n });\n\n currentModelResponse = await retry(() =>\n recordSpan({\n name: 'ai.generateText.doGenerate',\n attributes: selectTelemetryAttributes({\n telemetry,\n attributes: {\n ...assembleOperationName({\n operationId: 'ai.generateText.doGenerate',\n telemetry,\n }),\n ...baseTelemetryAttributes,\n // model:\n 'ai.model.provider': stepModel.provider,\n 'ai.model.id': stepModel.modelId,\n // prompt:\n 'ai.prompt.messages': {\n input: () => stringifyForTelemetry(promptMessages),\n },\n 'ai.prompt.tools': {\n // convert the language model level tools:\n input: () => stepTools?.map(tool => JSON.stringify(tool)),\n },\n 'ai.prompt.toolChoice': {\n input: () =>\n stepToolChoice != null\n ? JSON.stringify(stepToolChoice)\n : undefined,\n },\n\n // standardized gen-ai llm span attributes:\n 'gen_ai.system': stepModel.provider,\n 'gen_ai.request.model': stepModel.modelId,\n 'gen_ai.request.frequency_penalty': settings.frequencyPenalty,\n 'gen_ai.request.max_tokens': settings.maxOutputTokens,\n 'gen_ai.request.presence_penalty': settings.presencePenalty,\n 'gen_ai.request.stop_sequences': settings.stopSequences,\n 'gen_ai.request.temperature':\n settings.temperature ?? undefined,\n 'gen_ai.request.top_k': settings.topK,\n 'gen_ai.request.top_p': settings.topP,\n },\n }),\n tracer,\n fn: async span => {\n const result = await stepModel.doGenerate({\n ...callSettings,\n tools: stepTools,\n toolChoice: stepToolChoice,\n responseFormat: output?.responseFormat,\n prompt: promptMessages,\n providerOptions,\n abortSignal,\n headers,\n });\n\n // Fill in default values:\n const responseData = {\n id: result.response?.id ?? generateId(),\n timestamp: result.response?.timestamp ?? currentDate(),\n modelId: result.response?.modelId ?? stepModel.modelId,\n headers: result.response?.headers,\n body: result.response?.body,\n };\n\n // Add response information to the span:\n span.setAttributes(\n selectTelemetryAttributes({\n telemetry,\n attributes: {\n 'ai.response.finishReason': result.finishReason,\n 'ai.response.text': {\n output: () => extractContentText(result.content),\n },\n 'ai.response.toolCalls': {\n output: () => {\n const toolCalls = asToolCalls(result.content);\n return toolCalls == null\n ? undefined\n : JSON.stringify(toolCalls);\n },\n },\n 'ai.response.id': responseData.id,\n 'ai.response.model': responseData.modelId,\n 'ai.response.timestamp':\n responseData.timestamp.toISOString(),\n 'ai.response.providerMetadata': JSON.stringify(\n result.providerMetadata,\n ),\n\n // TODO rename telemetry attributes to inputTokens and outputTokens\n 'ai.usage.promptTokens': result.usage.inputTokens,\n 'ai.usage.completionTokens': result.usage.outputTokens,\n\n // standardized gen-ai llm span attributes:\n 'gen_ai.response.finish_reasons': [result.finishReason],\n 'gen_ai.response.id': responseData.id,\n 'gen_ai.response.model': responseData.modelId,\n 'gen_ai.usage.input_tokens': result.usage.inputTokens,\n 'gen_ai.usage.output_tokens': result.usage.outputTokens,\n },\n }),\n );\n\n return { ...result, response: responseData };\n },\n }),\n );\n\n // parse tool calls:\n const stepToolCalls = await Promise.all(\n currentModelResponse.content\n .filter(\n (part): part is LanguageModelV2ToolCall =>\n part.type === 'tool-call',\n )\n .map(toolCall =>\n parseToolCall({\n toolCall,\n tools,\n repairToolCall,\n system,\n messages: stepInputMessages,\n }),\n ),\n );\n\n // notify the tools that the tool calls are available:\n for (const toolCall of stepToolCalls) {\n const tool = tools![toolCall.toolName];\n if (tool?.onInputAvailable != null) {\n await tool.onInputAvailable({\n input: toolCall.input,\n toolCallId: toolCall.toolCallId,\n messages: stepInputMessages,\n abortSignal,\n });\n }\n }\n\n clientToolCalls = stepToolCalls.filter(\n toolCall => toolCall.providerExecuted !== true,\n );\n\n // execute tools:\n clientToolOutputs =\n tools == null\n ? []\n : await executeTools({\n toolCalls: clientToolCalls,\n tools,\n tracer,\n telemetry,\n messages: stepInputMessages,\n abortSignal,\n });\n\n // content:\n const stepContent = asContent({\n content: currentModelResponse.content,\n toolCalls: stepToolCalls,\n toolOutputs: clientToolOutputs,\n });\n\n // append to messages for potential next step:\n responseMessages.push(\n ...toResponseMessages({\n content: stepContent,\n tools,\n }),\n );\n\n // Add step information (after response messages are updated):\n const currentStepResult: StepResult<TOOLS> = new DefaultStepResult({\n content: stepContent,\n finishReason: currentModelResponse.finishReason,\n usage: currentModelResponse.usage,\n warnings: currentModelResponse.warnings,\n providerMetadata: currentModelResponse.providerMetadata,\n request: currentModelResponse.request ?? {},\n response: {\n ...currentModelResponse.response,\n // deep clone msgs to avoid mutating past messages in multi-step:\n messages: structuredClone(responseMessages),\n },\n });\n\n steps.push(currentStepResult);\n await onStepFinish?.(currentStepResult);\n } while (\n // there are tool calls:\n clientToolCalls.length > 0 &&\n // all current tool calls have outputs (incl. execution errors):\n clientToolOutputs.length === clientToolCalls.length &&\n // continue until a stop condition is met:\n !(await isStopConditionMet({ stopConditions, steps }))\n );\n\n // Add response information to the span:\n span.setAttributes(\n selectTelemetryAttributes({\n telemetry,\n attributes: {\n 'ai.response.finishReason': currentModelResponse.finishReason,\n 'ai.response.text': {\n output: () => extractContentText(currentModelResponse.content),\n },\n 'ai.response.toolCalls': {\n output: () => {\n const toolCalls = asToolCalls(currentModelResponse.content);\n return toolCalls == null\n ? undefined\n : JSON.stringify(toolCalls);\n },\n },\n 'ai.response.providerMetadata': JSON.stringify(\n currentModelResponse.providerMetadata,\n ),\n\n // TODO rename telemetry attributes to inputTokens and outputTokens\n 'ai.usage.promptTokens': currentModelResponse.usage.inputTokens,\n 'ai.usage.completionTokens':\n currentModelResponse.usage.outputTokens,\n },\n }),\n );\n\n const lastStep = steps[steps.length - 1];\n\n return new DefaultGenerateTextResult({\n steps,\n resolvedOutput: await output?.parseOutput(\n { text: lastStep.text },\n {\n response: lastStep.response,\n usage: lastStep.usage,\n finishReason: lastStep.finishReason,\n },\n ),\n });\n },\n });\n } catch (error) {\n throw wrapGatewayError(error);\n }\n}\n\nasync function executeTools<TOOLS extends ToolSet>({\n toolCalls,\n tools,\n tracer,\n telemetry,\n messages,\n abortSignal,\n}: {\n toolCalls: Array<TypedToolCall<TOOLS>>;\n tools: TOOLS;\n tracer: Tracer;\n telemetry: TelemetrySettings | undefined;\n messages: ModelMessage[];\n abortSignal: AbortSignal | undefined;\n}): Promise<Array<ToolOutput<TOOLS>>> {\n const toolOutputs = await Promise.all(\n toolCalls.map(async ({ toolCallId, toolName, input }) => {\n const tool = tools[toolName];\n\n if (tool?.execute == null) {\n return undefined;\n }\n\n return recordSpan({\n name: 'ai.toolCall',\n attributes: selectTelemetryAttributes({\n telemetry,\n attributes: {\n ...assembleOperationName({\n operationId: 'ai.toolCall',\n telemetry,\n }),\n 'ai.toolCall.name': toolName,\n 'ai.toolCall.id': toolCallId,\n 'ai.toolCall.args': {\n output: () => JSON.stringify(input),\n },\n },\n }),\n tracer,\n fn: async span => {\n try {\n const output = await tool.execute!(input, {\n toolCallId,\n messages,\n abortSignal,\n });\n\n try {\n span.setAttributes(\n selectTelemetryAttributes({\n telemetry,\n attributes: {\n 'ai.toolCall.result': {\n output: () => JSON.stringify(output),\n },\n },\n }),\n );\n } catch (ignored) {\n // JSON stringify might fail if the result is not serializable,\n // in which case we just ignore it. In the future we might want to\n // add an optional serialize method to the tool interface and warn\n // if the result is not serializable.\n }\n\n return {\n type: 'tool-result',\n toolCallId,\n toolName,\n input,\n output,\n dynamic: tool.type === 'dynamic',\n } as TypedToolResult<TOOLS>;\n } catch (error) {\n recordErrorOnSpan(span, error);\n return {\n type: 'tool-error',\n toolCallId,\n toolName,\n input,\n error,\n dynamic: tool.type === 'dynamic',\n } as TypedToolError<TOOLS>;\n }\n },\n });\n }),\n );\n\n return toolOutputs.filter(\n (output): output is NonNullable<typeof output> => output != null,\n );\n}\n\nclass DefaultGenerateTextResult<TOOLS extends ToolSet, OUTPUT>\n implements GenerateTextResult<TOOLS, OUTPUT>\n{\n readonly steps: GenerateTextResult<TOOLS, OUTPUT>['steps'];\n\n private readonly resolvedOutput: OUTPUT;\n\n constructor(options: {\n steps: GenerateTextResult<TOOLS, OUTPUT>['steps'];\n resolvedOutput: OUTPUT;\n }) {\n this.steps = options.steps;\n this.resolvedOutput = options.resolvedOutput;\n }\n\n private get finalStep() {\n return this.steps[this.steps.length - 1];\n }\n\n get content() {\n return this.finalStep.content;\n }\n\n get text() {\n return this.finalStep.text;\n }\n\n get files() {\n return this.finalStep.files;\n }\n\n get reasoningText() {\n return this.finalStep.reasoningText;\n }\n\n get reasoning() {\n return this.finalStep.reasoning;\n }\n\n get toolCalls() {\n return this.finalStep.toolCalls;\n }\n\n get staticToolCalls() {\n return this.finalStep.staticToolCalls;\n }\n\n get dynamicToolCalls() {\n return this.finalStep.dynamicToolCalls;\n }\n\n get toolResults() {\n return this.finalStep.toolResults;\n }\n\n get staticToolResults() {\n return this.finalStep.staticToolResults;\n }\n\n get dynamicToolResults() {\n return this.finalStep.dynamicToolResults;\n }\n\n get sources() {\n return this.finalStep.sources;\n }\n\n get finishReason() {\n return this.finalStep.finishReason;\n }\n\n get warnings() {\n return this.finalStep.warnings;\n }\n\n get providerMetadata() {\n return this.finalStep.providerMetadata;\n }\n\n get response() {\n return this.finalStep.response;\n }\n\n get request() {\n return this.finalStep.request;\n }\n\n get usage() {\n return this.finalStep.usage;\n }\n\n get totalUsage() {\n return this.steps.reduce(\n (totalUsage, step) => {\n return addLanguageModelUsage(totalUsage, step.usage);\n },\n {\n inputTokens: undefined,\n outputTokens: undefined,\n totalTokens: undefined,\n reasoningTokens: undefined,\n cachedInputTokens: undefined,\n } as LanguageModelUsage,\n );\n }\n\n get experimental_output() {\n if (this.resolvedOutput == null) {\n throw new NoOutputSpecifiedError();\n }\n\n return this.resolvedOutput;\n }\n}\n\nfunction asToolCalls(content: Array<LanguageModelV2Content>) {\n const parts = content.filter(\n (part): part is LanguageModelV2ToolCall => part.type === 'tool-call',\n );\n\n if (parts.length === 0) {\n return undefined;\n }\n\n return parts.map(toolCall => ({\n toolCallId: toolCall.toolCallId,\n toolName: toolCall.toolName,\n input: toolCall.input,\n }));\n}\n\nfunction asContent<TOOLS extends ToolSet>({\n content,\n toolCalls,\n toolOutputs,\n}: {\n content: Array<LanguageModelV2Content>;\n toolCalls: Array<TypedToolCall<TOOLS>>;\n toolOutputs: Array<ToolOutput<TOOLS>>;\n}): Array<ContentPart<TOOLS>> {\n return [\n ...content.map(part => {\n switch (part.type) {\n case 'text':\n case 'reasoning':\n case 'source':\n return part;\n\n case 'file': {\n return {\n type: 'file' as const,\n file: new DefaultGeneratedFile(part),\n };\n }\n\n case 'tool-call': {\n return toolCalls.find(\n toolCall => toolCall.toolCallId === part.toolCallId,\n )!;\n }\n\n case 'tool-result': {\n const toolCall = toolCalls.find(\n toolCall => toolCall.toolCallId === part.toolCallId,\n )!;\n\n if (toolCall == null) {\n throw new Error(`Tool call ${part.toolCallId} not found.`);\n }\n\n if (part.isError) {\n return {\n type: 'tool-error' as const,\n toolCallId: part.toolCallId,\n toolName: part.toolName as keyof TOOLS & string,\n input: toolCall.input,\n error: part.result,\n providerExecuted: true,\n dynamic: toolCall.dynamic,\n } as TypedToolError<TOOLS>;\n }\n\n return {\n type: 'tool-result' as const,\n toolCallId: part.toolCallId,\n toolName: part.toolName as keyof TOOLS & string,\n input: toolCall.input,\n output: part.result,\n providerExecuted: true,\n dynamic: toolCall.dynamic,\n } as TypedToolResult<TOOLS>;\n }\n }\n }),\n ...toolOutputs,\n ];\n}\n","import { AISDKError } from '@ai-sdk/provider';\n\nconst name = 'AI_NoOutputSpecifiedError';\nconst marker = `vercel.ai.error.${name}`;\nconst symbol = Symbol.for(marker);\n\n/**\nThrown when no output type is specified and output-related methods are called.\n */\nexport class NoOutputSpecifiedError extends AISDKError {\n private readonly [symbol] = true; // used in isInstance\n\n constructor({ message = 'No output specified.' }: { message?: string } = {}) {\n super({ name, message });\n }\n\n static isInstance(error: unknown): error is NoOutputSpecifiedError {\n return AISDKError.hasMarker(error, marker);\n }\n}\n","export function asArray<T>(value: T | T[] | undefined): T[] {\n return value === undefined ? [] : Array.isArray(value) ? value : [value];\n}\n","import { AISDKError } from '@ai-sdk/provider';\n\nconst name = 'AI_InvalidArgumentError';\nconst marker = `vercel.ai.error.${name}`;\nconst symbol = Symbol.for(marker);\n\nexport class InvalidArgumentError extends AISDKError {\n private readonly [symbol] = true; // used in isInstance\n\n readonly parameter: string;\n readonly value: unknown;\n\n constructor({\n parameter,\n value,\n message,\n }: {\n parameter: string;\n value: unknown;\n message: string;\n }) {\n super({\n name,\n message: `Invalid argument for parameter ${parameter}: ${message}`,\n });\n\n this.parameter = parameter;\n this.value = value;\n }\n\n static isInstance(error: unknown): error is InvalidArgumentError {\n return AISDKError.hasMarker(error, marker);\n }\n}\n","import { APICallError } from '@ai-sdk/provider';\nimport { delay, getErrorMessage, isAbortError } from '@ai-sdk/provider-utils';\nimport { RetryError } from './retry-error';\n\nexport type RetryFunction = <OUTPUT>(\n fn: () => PromiseLike<OUTPUT>,\n) => PromiseLike<OUTPUT>;\n\nfunction getRetryDelayInMs({\n error,\n exponentialBackoffDelay,\n}: {\n error: APICallError;\n exponentialBackoffDelay: number;\n}): number {\n const headers = error.responseHeaders;\n\n if (!headers) return exponentialBackoffDelay;\n\n let ms: number | undefined;\n\n // retry-ms is more precise than retry-after and used by e.g. OpenAI\n const retryAfterMs = headers['retry-after-ms'];\n if (retryAfterMs) {\n const timeoutMs = parseFloat(retryAfterMs);\n if (!Number.isNaN(timeoutMs)) {\n ms = timeoutMs;\n }\n }\n\n // About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After\n const retryAfter = headers['retry-after'];\n if (retryAfter && ms === undefined) {\n const timeoutSeconds = parseFloat(retryAfter);\n if (!Number.isNaN(timeoutSeconds)) {\n ms = timeoutSeconds * 1000;\n } else {\n ms = Date.parse(retryAfter) - Date.now();\n }\n }\n\n // check that the delay is reasonable:\n if (\n ms != null &&\n !Number.isNaN(ms) &&\n 0 <= ms &&\n (ms < 60 * 1000 || ms < exponentialBackoffDelay)\n ) {\n return ms;\n }\n\n return exponentialBackoffDelay;\n}\n\n/**\nThe `retryWithExponentialBackoffRespectingRetryHeaders` strategy retries a failed API call with an exponential backoff,\nwhile respecting rate limit headers (retry-after-ms and retry-after) if they are provided and reasonable (0-60 seconds).\nYou can configure the maximum number of retries, the initial delay, and the backoff factor.\n */\nexport const retryWithExponentialBackoffRespectingRetryHeaders =\n ({\n maxRetries = 2,\n initialDelayInMs = 2000,\n backoffFactor = 2,\n abortSignal,\n }: {\n maxRetries?: number;\n initialDelayInMs?: number;\n backoffFactor?: number;\n abortSignal?: AbortSignal;\n } = {}): RetryFunction =>\n async <OUTPUT>(f: () => PromiseLike<OUTPUT>) =>\n _retryWithExponentialBackoff(f, {\n maxRetries,\n delayInMs: initialDelayInMs,\n backoffFactor,\n abortSignal,\n });\n\nasync function _retryWithExponentialBackoff<OUTPUT>(\n f: () => PromiseLike<OUTPUT>,\n {\n maxRetries,\n delayInMs,\n backoffFactor,\n abortSignal,\n }: {\n maxRetries: number;\n delayInMs: number;\n backoffFactor: number;\n abortSignal: AbortSignal | undefined;\n },\n errors: unknown[] = [],\n): Promise<OUTPUT> {\n try {\n return await f();\n } catch (error) {\n if (isAbortError(error)) {\n throw error; // don't retry when the request was aborted\n }\n\n if (maxRetries === 0) {\n throw error; // don't wrap the error when retries are disabled\n }\n\n const errorMessage = getErrorMessage(error);\n const newErrors = [...errors, error];\n const tryNumber = newErrors.length;\n\n if (tryNumber > maxRetries) {\n throw new RetryError({\n message: `Failed after ${tryNumber} attempts. Last error: ${errorMessage}`,\n reason: 'maxRetriesExceeded',\n errors: newErrors,\n });\n }\n\n if (\n error instanceof Error &&\n APICallError.isInstance(error) &&\n error.isRetryable === true &&\n tryNumber <= maxRetries\n ) {\n await delay(\n getRetryDelayInMs({\n error,\n exponentialBackoffDelay: delayInMs,\n }),\n { abortSignal },\n );\n\n return _retryWithExponentialBackoff(\n f,\n {\n maxRetries,\n delayInMs: backoffFactor * delayInMs,\n backoffFactor,\n abortSignal,\n },\n newErrors,\n );\n }\n\n if (tryNumber === 1) {\n throw error; // don't wrap the error when a non-retryable error occurs on the first try\n }\n\n throw new RetryError({\n message: `Failed after ${tryNumber} attempts with non-retryable error: '${errorMessage}'`,\n reason: 'errorNotRetryable',\n errors: newErrors,\n });\n }\n}\n","import { AISDKError } from '@ai-sdk/provider';\n\nconst name = 'AI_RetryError';\nconst marker = `vercel.ai.error.${name}`;\nconst symbol = Symbol.for(marker);\n\nexport type RetryErrorReason =\n | 'maxRetriesExceeded'\n | 'errorNotRetryable'\n | 'abort';\n\nexport class RetryError extends AISDKError {\n private readonly [symbol] = true; // used in isInstance\n\n // note: property order determines debugging output\n readonly reason: RetryErrorReason;\n readonly lastError: unknown;\n readonly errors: Array<unknown>;\n\n constructor({\n message,\n reason,\n errors,\n }: {\n message: string;\n reason: RetryErrorReason;\n errors: Array<unknown>;\n }) {\n super({ name, message });\n\n this.reason = reason;\n this.errors = errors;\n\n // separate our last error to make debugging via log easier:\n this.lastError = errors[errors.length - 1];\n }\n\n static isInstance(error: unknown): error is RetryError {\n return AISDKError.hasMarker(error, marker);\n }\n}\n","import { InvalidArgumentError } from '../error/invalid-argument-error';\nimport {\n RetryFunction,\n retryWithExponentialBackoffRespectingRetryHeaders,\n} from '../util/retry-with-exponential-backoff';\n\n/**\n * Validate and prepare retries.\n */\nexport function prepareRetries({\n maxRetries,\n abortSignal,\n}: {\n maxRetries: number | undefined;\n abortSignal: AbortSignal | undefined;\n}): {\n maxRetries: number;\n retry: RetryFunction;\n} {\n if (maxRetries != null) {\n if (!Number.isInteger(maxRetries)) {\n throw new InvalidArgumentError({\n parameter: 'maxRetries',\n value: maxRetries,\n message: 'maxRetries must be an integer',\n });\n }\n\n if (maxRetries < 0) {\n throw new InvalidArgumentError({\n parameter: 'maxRetries',\n value: maxRetries,\n message: 'maxRetries must be >= 0',\n });\n }\n }\n\n const maxRetriesResult = maxRetries ?? 2;\n\n return {\n maxRetries: maxRetriesResult,\n retry: retryWithExponentialBackoffRespectingRetryHeaders({\n maxRetries: maxRetriesResult,\n abortSignal,\n }),\n };\n}\n","import {\n LanguageModelV2FilePart,\n LanguageModelV2Message,\n LanguageModelV2Prompt,\n LanguageModelV2TextPart,\n} from '@ai-sdk/provider';\nimport {\n DataContent,\n FilePart,\n ImagePart,\n isUrlSupported,\n ModelMessage,\n TextPart,\n} from '@ai-sdk/provider-utils';\nimport {\n detectMediaType,\n imageMediaTypeSignatures,\n} from '../util/detect-media-type';\nimport { download } from '../util/download';\nimport { convertToLanguageModelV2DataContent } from './data-content';\nimport { InvalidMessageRoleError } from './invalid-message-role-error';\nimport { StandardizedPrompt } from './standardize-prompt';\n\nexport async function convertToLanguageModelPrompt({\n prompt,\n supportedUrls,\n downloadImplementation = download,\n}: {\n prompt: StandardizedPrompt;\n supportedUrls: Record<string, RegExp[]>;\n downloadImplementation?: typeof download;\n}): Promise<LanguageModelV2Prompt> {\n const downloadedAssets = await downloadAssets(\n prompt.messages,\n downloadImplementation,\n supportedUrls,\n );\n\n return [\n ...(prompt.system != null\n ? [{ role: 'system' as const, content: prompt.system }]\n : []),\n ...prompt.messages.map(message =>\n convertToLanguageModelMessage({ message, downloadedAssets }),\n ),\n ];\n}\n\n/**\n * Convert a ModelMessage to a LanguageModelV2Message.\n *\n * @param message The ModelMessage to convert.\n * @param downloadedAssets A map of URLs to their downloaded data. Only\n * available if the model does not support URLs, null otherwise.\n */\nexport function convertToLanguageModelMessage({\n message,\n downloadedAssets,\n}: {\n message: ModelMessage;\n downloadedAssets: Record<\n string,\n { mediaType: string | undefined; data: Uint8Array }\n >;\n}): LanguageModelV2Message {\n const role = message.role;\n switch (role) {\n case 'system': {\n return {\n role: 'system',\n content: message.content,\n providerOptions: message.providerOptions,\n };\n }\n\n case 'user': {\n if (typeof message.content === 'string') {\n return {\n role: 'user',\n content: [{ type: 'text', text: message.content }],\n providerOptions: message.providerOptions,\n };\n }\n\n return {\n role: 'user',\n content: message.content\n .map(part => convertPartToLanguageModelPart(part, downloadedAssets))\n // remove empty text parts:\n .filter(part => part.type !== 'text' || part.text !== ''),\n providerOptions: message.providerOptions,\n };\n }\n\n case 'assistant': {\n if (typeof message.content === 'string') {\n return {\n role: 'assistant',\n content: [{ type: 'text', text: message.content }],\n providerOptions: message.providerOptions,\n };\n }\n\n return {\n role: 'assistant',\n content: message.content\n .filter(\n // remove empty text parts:\n part => part.type !== 'text' || part.text !== '',\n )\n .map(part => {\n const providerOptions = part.providerOptions;\n\n switch (part.type) {\n case 'file': {\n const { data, mediaType } = convertToLanguageModelV2DataContent(\n part.data,\n );\n return {\n type: 'file',\n data,\n filename: part.filename,\n mediaType: mediaType ?? part.mediaType,\n providerOptions,\n };\n }\n case 'reasoning': {\n return {\n type: 'reasoning',\n text: part.text,\n providerOptions,\n };\n }\n case 'text': {\n return {\n type: 'text' as const,\n text: part.text,\n providerOptions,\n };\n }\n case 'tool-call': {\n return {\n type: 'tool-call' as const,\n toolCallId: part.toolCallId,\n toolName: part.toolName,\n input: part.input,\n providerExecuted: part.providerExecuted,\n providerOptions,\n };\n }\n case 'tool-result': {\n return {\n type: 'tool-result' as const,\n toolCallId: part.toolCallId,\n toolName: part.toolName,\n output: part.output,\n providerOptions,\n };\n }\n }\n }),\n providerOptions: message.providerOptions,\n };\n }\n\n case 'tool': {\n return {\n role: 'tool',\n content: message.content.map(part => ({\n type: 'tool-result' as const,\n toolCallId: part.toolCallId,\n toolName: part.toolName,\n output: part.output,\n providerOptions: part.providerOptions,\n })),\n providerOptions: message.providerOptions,\n };\n }\n\n default: {\n const _exhaustiveCheck: never = role;\n throw new InvalidMessageRoleError({ role: _exhaustiveCheck });\n }\n }\n}\n\n/**\n * Downloads images and files from URLs in the messages.\n */\nasync function downloadAssets(\n messages: ModelMessage[],\n downloadImplementation: typeof download,\n supportedUrls: Record<string, RegExp[]>,\n): Promise<\n Record<string, { mediaType: string | undefined; data: Uint8Array }>\n> {\n const urls = messages\n .filter(message => message.role === 'user')\n .map(message => message.content)\n .filter((content): content is Array<TextPart | ImagePart | FilePart> =>\n Array.isArray(content),\n )\n .flat()\n .filter(\n (part): part is ImagePart | FilePart =>\n part.type === 'image' || part.type === 'file',\n )\n .map(part => {\n const mediaType =\n part.mediaType ?? (part.type === 'image' ? 'image/*' : undefined);\n\n let data = part.type === 'image' ? part.image : part.data;\n if (typeof data === 'string') {\n try {\n data = new URL(data);\n } catch (ignored) {}\n }\n\n return { mediaType, data };\n })\n /**\n * Filter out URLs that the model supports natively, so we don't download them.\n */\n .filter(\n (part): part is { mediaType: string; data: URL } =>\n part.data instanceof URL &&\n part.mediaType != null &&\n !isUrlSupported({\n url: part.data.toString(),\n mediaType: part.mediaType,\n supportedUrls,\n }),\n )\n .map(part => part.data);\n\n // download in parallel:\n const downloadedImages = await Promise.all(\n urls.map(async url => ({\n url,\n data: await downloadImplementation({ url }),\n })),\n );\n\n return Object.fromEntries(\n downloadedImages.map(({ url, data }) => [url.toString(), data]),\n );\n}\n\n/**\n * Convert part of a message to a LanguageModelV2Part.\n * @param part The part to convert.\n * @param downloadedAssets A map of URLs to their downloaded data. Only\n * available if the model does not support URLs, null otherwise.\n *\n * @returns The converted part.\n */\n