@ai-sdk/anthropic
Version:
The **[Anthropic provider](https://ai-sdk.dev/providers/ai-sdk-providers/anthropic)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the [Anthropic Messages API](https://docs.anthropic.com/claude/reference/messages_post).
1 lines • 129 kB
Source Map (JSON)
{"version":3,"sources":["../src/anthropic-provider.ts","../src/anthropic-messages-language-model.ts","../src/anthropic-error.ts","../src/anthropic-messages-options.ts","../src/anthropic-prepare-tools.ts","../src/get-cache-control.ts","../src/tool/web-search_20250305.ts","../src/convert-to-anthropic-messages-prompt.ts","../src/map-anthropic-stop-reason.ts","../src/tool/bash_20241022.ts","../src/tool/bash_20250124.ts","../src/tool/computer_20241022.ts","../src/tool/computer_20250124.ts","../src/tool/text-editor_20241022.ts","../src/tool/text-editor_20250124.ts","../src/tool/text-editor_20250429.ts","../src/anthropic-tools.ts"],"sourcesContent":["import {\n LanguageModelV2,\n NoSuchModelError,\n ProviderV2,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n generateId,\n loadApiKey,\n withoutTrailingSlash,\n} from '@ai-sdk/provider-utils';\nimport { AnthropicMessagesLanguageModel } from './anthropic-messages-language-model';\nimport { AnthropicMessagesModelId } from './anthropic-messages-options';\nimport { anthropicTools } from './anthropic-tools';\n\nexport interface AnthropicProvider extends ProviderV2 {\n /**\nCreates a model for text generation.\n*/\n (modelId: AnthropicMessagesModelId): LanguageModelV2;\n\n /**\nCreates a model for text generation.\n*/\n languageModel(modelId: AnthropicMessagesModelId): LanguageModelV2;\n\n chat(modelId: AnthropicMessagesModelId): LanguageModelV2;\n\n messages(modelId: AnthropicMessagesModelId): LanguageModelV2;\n\n /**\nAnthropic-specific computer use tool.\n */\n tools: typeof anthropicTools;\n}\n\nexport interface AnthropicProviderSettings {\n /**\nUse a different URL prefix for API calls, e.g. to use proxy servers.\nThe default prefix is `https://api.anthropic.com/v1`.\n */\n baseURL?: string;\n\n /**\nAPI key that is being send using the `x-api-key` header.\nIt defaults to the `ANTHROPIC_API_KEY` environment variable.\n */\n apiKey?: string;\n\n /**\nCustom headers to include in the requests.\n */\n headers?: Record<string, string>;\n\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n\n generateId?: () => string;\n}\n\n/**\nCreate an Anthropic provider instance.\n */\nexport function createAnthropic(\n options: AnthropicProviderSettings = {},\n): AnthropicProvider {\n const baseURL =\n withoutTrailingSlash(options.baseURL) ?? 'https://api.anthropic.com/v1';\n\n const getHeaders = () => ({\n 'anthropic-version': '2023-06-01',\n 'x-api-key': loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'ANTHROPIC_API_KEY',\n description: 'Anthropic',\n }),\n ...options.headers,\n });\n\n const createChatModel = (modelId: AnthropicMessagesModelId) =>\n new AnthropicMessagesLanguageModel(modelId, {\n provider: 'anthropic.messages',\n baseURL,\n headers: getHeaders,\n fetch: options.fetch,\n generateId: options.generateId ?? generateId,\n supportedUrls: () => ({\n 'image/*': [/^https?:\\/\\/.*$/],\n }),\n });\n\n const provider = function (modelId: AnthropicMessagesModelId) {\n if (new.target) {\n throw new Error(\n 'The Anthropic model function cannot be called with the new keyword.',\n );\n }\n\n return createChatModel(modelId);\n };\n\n provider.languageModel = createChatModel;\n provider.chat = createChatModel;\n provider.messages = createChatModel;\n\n provider.textEmbeddingModel = (modelId: string) => {\n throw new NoSuchModelError({ modelId, modelType: 'textEmbeddingModel' });\n };\n provider.imageModel = (modelId: string) => {\n throw new NoSuchModelError({ modelId, modelType: 'imageModel' });\n };\n\n provider.tools = anthropicTools;\n\n return provider;\n}\n\n/**\nDefault Anthropic provider instance.\n */\nexport const anthropic = createAnthropic();\n","import {\n JSONObject,\n JSONValue,\n LanguageModelV2,\n LanguageModelV2CallWarning,\n LanguageModelV2Content,\n LanguageModelV2FinishReason,\n LanguageModelV2FunctionTool,\n LanguageModelV2Prompt,\n LanguageModelV2StreamPart,\n LanguageModelV2Usage,\n SharedV2ProviderMetadata,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n ParseResult,\n Resolvable,\n combineHeaders,\n createEventSourceResponseHandler,\n createJsonResponseHandler,\n generateId,\n parseProviderOptions,\n postJsonToApi,\n resolve,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport { anthropicFailedResponseHandler } from './anthropic-error';\nimport {\n AnthropicMessagesModelId,\n anthropicProviderOptions,\n} from './anthropic-messages-options';\nimport { prepareTools } from './anthropic-prepare-tools';\nimport { convertToAnthropicMessagesPrompt } from './convert-to-anthropic-messages-prompt';\nimport { mapAnthropicStopReason } from './map-anthropic-stop-reason';\n\nconst citationSchemas = {\n webSearchResult: z.object({\n type: z.literal('web_search_result_location'),\n cited_text: z.string(),\n url: z.string(),\n title: z.string(),\n encrypted_index: z.string(),\n }),\n pageLocation: z.object({\n type: z.literal('page_location'),\n cited_text: z.string(),\n document_index: z.number(),\n document_title: z.string().nullable(),\n start_page_number: z.number(),\n end_page_number: z.number(),\n }),\n charLocation: z.object({\n type: z.literal('char_location'),\n cited_text: z.string(),\n document_index: z.number(),\n document_title: z.string().nullable(),\n start_char_index: z.number(),\n end_char_index: z.number(),\n }),\n};\n\nconst citationSchema = z.discriminatedUnion('type', [\n citationSchemas.webSearchResult,\n citationSchemas.pageLocation,\n citationSchemas.charLocation,\n]);\n\nconst documentCitationSchema = z.discriminatedUnion('type', [\n citationSchemas.pageLocation,\n citationSchemas.charLocation,\n]);\n\ntype Citation = z.infer<typeof citationSchema>;\nexport type DocumentCitation = z.infer<typeof documentCitationSchema>;\nexport type AnthropicProviderMetadata = SharedV2ProviderMetadata & {\n usage?: Record<string, JSONValue>;\n};\n\nfunction processCitation(\n citation: Citation,\n citationDocuments: Array<{\n title: string;\n filename?: string;\n mediaType: string;\n }>,\n generateId: () => string,\n onSource: (source: any) => void,\n) {\n if (citation.type === 'page_location' || citation.type === 'char_location') {\n const source = createCitationSource(\n citation,\n citationDocuments,\n generateId,\n );\n if (source) {\n onSource(source);\n }\n }\n}\n\nfunction createCitationSource(\n citation: DocumentCitation,\n citationDocuments: Array<{\n title: string;\n filename?: string;\n mediaType: string;\n }>,\n generateId: () => string,\n) {\n const documentInfo = citationDocuments[citation.document_index];\n if (!documentInfo) {\n return null;\n }\n\n const providerMetadata =\n citation.type === 'page_location'\n ? {\n citedText: citation.cited_text,\n startPageNumber: citation.start_page_number,\n endPageNumber: citation.end_page_number,\n }\n : {\n citedText: citation.cited_text,\n startCharIndex: citation.start_char_index,\n endCharIndex: citation.end_char_index,\n };\n\n return {\n type: 'source' as const,\n sourceType: 'document' as const,\n id: generateId(),\n mediaType: documentInfo.mediaType,\n title: citation.document_title ?? documentInfo.title,\n filename: documentInfo.filename,\n providerMetadata: {\n anthropic: providerMetadata,\n },\n };\n}\n\ntype AnthropicMessagesConfig = {\n provider: string;\n baseURL: string;\n headers: Resolvable<Record<string, string | undefined>>;\n fetch?: FetchFunction;\n buildRequestUrl?: (baseURL: string, isStreaming: boolean) => string;\n transformRequestBody?: (args: Record<string, any>) => Record<string, any>;\n supportedUrls?: () => LanguageModelV2['supportedUrls'];\n generateId?: () => string;\n};\n\nexport class AnthropicMessagesLanguageModel implements LanguageModelV2 {\n readonly specificationVersion = 'v2';\n\n readonly modelId: AnthropicMessagesModelId;\n\n private readonly config: AnthropicMessagesConfig;\n private readonly generateId: () => string;\n\n constructor(\n modelId: AnthropicMessagesModelId,\n config: AnthropicMessagesConfig,\n ) {\n this.modelId = modelId;\n this.config = config;\n this.generateId = config.generateId ?? generateId;\n }\n\n supportsUrl(url: URL): boolean {\n return url.protocol === 'https:';\n }\n\n get provider(): string {\n return this.config.provider;\n }\n\n get supportedUrls() {\n return this.config.supportedUrls?.() ?? {};\n }\n\n private async getArgs({\n prompt,\n maxOutputTokens = 4096, // 4096: max model output tokens TODO update default in v5\n temperature,\n topP,\n topK,\n frequencyPenalty,\n presencePenalty,\n stopSequences,\n responseFormat,\n seed,\n tools,\n toolChoice,\n providerOptions,\n }: Parameters<LanguageModelV2['doGenerate']>[0]) {\n const warnings: LanguageModelV2CallWarning[] = [];\n\n if (frequencyPenalty != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'frequencyPenalty',\n });\n }\n\n if (presencePenalty != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'presencePenalty',\n });\n }\n\n if (seed != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'seed',\n });\n }\n\n if (responseFormat?.type === 'json') {\n if (responseFormat.schema == null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'responseFormat',\n details:\n 'JSON response format requires a schema. ' +\n 'The response format is ignored.',\n });\n } else if (tools != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'tools',\n details:\n 'JSON response format does not support tools. ' +\n 'The provided tools are ignored.',\n });\n }\n }\n\n const jsonResponseTool: LanguageModelV2FunctionTool | undefined =\n responseFormat?.type === 'json' && responseFormat.schema != null\n ? {\n type: 'function',\n name: 'json',\n description: 'Respond with a JSON object.',\n inputSchema: responseFormat.schema,\n }\n : undefined;\n\n const anthropicOptions = await parseProviderOptions({\n provider: 'anthropic',\n providerOptions,\n schema: anthropicProviderOptions,\n });\n\n const { prompt: messagesPrompt, betas: messagesBetas } =\n await convertToAnthropicMessagesPrompt({\n prompt,\n sendReasoning: anthropicOptions?.sendReasoning ?? true,\n warnings,\n });\n\n const isThinking = anthropicOptions?.thinking?.type === 'enabled';\n const thinkingBudget = anthropicOptions?.thinking?.budgetTokens;\n\n const baseArgs = {\n // model id:\n model: this.modelId,\n\n // standardized settings:\n max_tokens: maxOutputTokens,\n temperature,\n top_k: topK,\n top_p: topP,\n stop_sequences: stopSequences,\n\n // provider specific settings:\n ...(isThinking && {\n thinking: { type: 'enabled', budget_tokens: thinkingBudget },\n }),\n\n // prompt:\n system: messagesPrompt.system,\n messages: messagesPrompt.messages,\n };\n\n if (isThinking) {\n if (thinkingBudget == null) {\n throw new UnsupportedFunctionalityError({\n functionality: 'thinking requires a budget',\n });\n }\n\n if (baseArgs.temperature != null) {\n baseArgs.temperature = undefined;\n warnings.push({\n type: 'unsupported-setting',\n setting: 'temperature',\n details: 'temperature is not supported when thinking is enabled',\n });\n }\n\n if (topK != null) {\n baseArgs.top_k = undefined;\n warnings.push({\n type: 'unsupported-setting',\n setting: 'topK',\n details: 'topK is not supported when thinking is enabled',\n });\n }\n\n if (topP != null) {\n baseArgs.top_p = undefined;\n warnings.push({\n type: 'unsupported-setting',\n setting: 'topP',\n details: 'topP is not supported when thinking is enabled',\n });\n }\n\n // adjust max tokens to account for thinking:\n baseArgs.max_tokens = maxOutputTokens + thinkingBudget;\n }\n\n const {\n tools: anthropicTools,\n toolChoice: anthropicToolChoice,\n toolWarnings,\n betas: toolsBetas,\n } = prepareTools(\n jsonResponseTool != null\n ? {\n tools: [jsonResponseTool],\n toolChoice: { type: 'tool', toolName: jsonResponseTool.name },\n disableParallelToolUse: anthropicOptions?.disableParallelToolUse,\n }\n : {\n tools: tools ?? [],\n toolChoice,\n disableParallelToolUse: anthropicOptions?.disableParallelToolUse,\n },\n );\n\n return {\n args: {\n ...baseArgs,\n tools: anthropicTools,\n tool_choice: anthropicToolChoice,\n },\n warnings: [...warnings, ...toolWarnings],\n betas: new Set([...messagesBetas, ...toolsBetas]),\n usesJsonResponseTool: jsonResponseTool != null,\n };\n }\n\n private async getHeaders({\n betas,\n headers,\n }: {\n betas: Set<string>;\n headers: Record<string, string | undefined> | undefined;\n }) {\n return combineHeaders(\n await resolve(this.config.headers),\n betas.size > 0 ? { 'anthropic-beta': Array.from(betas).join(',') } : {},\n headers,\n );\n }\n\n private buildRequestUrl(isStreaming: boolean): string {\n return (\n this.config.buildRequestUrl?.(this.config.baseURL, isStreaming) ??\n `${this.config.baseURL}/messages`\n );\n }\n\n private transformRequestBody(args: Record<string, any>): Record<string, any> {\n return this.config.transformRequestBody?.(args) ?? args;\n }\n\n private extractCitationDocuments(prompt: LanguageModelV2Prompt): Array<{\n title: string;\n filename?: string;\n mediaType: string;\n }> {\n const isCitationPart = (part: {\n type: string;\n mediaType?: string;\n providerOptions?: { anthropic?: { citations?: { enabled?: boolean } } };\n }) => {\n if (part.type !== 'file') {\n return false;\n }\n\n if (\n part.mediaType !== 'application/pdf' &&\n part.mediaType !== 'text/plain'\n ) {\n return false;\n }\n\n const anthropic = part.providerOptions?.anthropic;\n const citationsConfig = anthropic?.citations as\n | { enabled?: boolean }\n | undefined;\n return citationsConfig?.enabled ?? false;\n };\n\n return prompt\n .filter(message => message.role === 'user')\n .flatMap(message => message.content)\n .filter(isCitationPart)\n .map(part => {\n // TypeScript knows this is a file part due to our filter\n const filePart = part as Extract<typeof part, { type: 'file' }>;\n return {\n title: filePart.filename ?? 'Untitled Document',\n filename: filePart.filename,\n mediaType: filePart.mediaType,\n };\n });\n }\n\n async doGenerate(\n options: Parameters<LanguageModelV2['doGenerate']>[0],\n ): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>> {\n const { args, warnings, betas, usesJsonResponseTool } =\n await this.getArgs(options);\n\n // Extract citation documents for response processing\n const citationDocuments = this.extractCitationDocuments(options.prompt);\n\n const {\n responseHeaders,\n value: response,\n rawValue: rawResponse,\n } = await postJsonToApi({\n url: this.buildRequestUrl(false),\n headers: await this.getHeaders({ betas, headers: options.headers }),\n body: this.transformRequestBody(args),\n failedResponseHandler: anthropicFailedResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(\n anthropicMessagesResponseSchema,\n ),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n const content: Array<LanguageModelV2Content> = [];\n\n // map response content to content array\n for (const part of response.content) {\n switch (part.type) {\n case 'text': {\n // when a json response tool is used, the tool call is returned as text,\n // so we ignore the text content:\n if (!usesJsonResponseTool) {\n content.push({ type: 'text', text: part.text });\n\n // Process citations if present\n if (part.citations) {\n for (const citation of part.citations) {\n processCitation(\n citation,\n citationDocuments,\n this.generateId,\n source => content.push(source),\n );\n }\n }\n }\n break;\n }\n case 'thinking': {\n content.push({\n type: 'reasoning',\n text: part.thinking,\n providerMetadata: {\n anthropic: {\n signature: part.signature,\n } satisfies AnthropicReasoningMetadata,\n },\n });\n break;\n }\n case 'redacted_thinking': {\n content.push({\n type: 'reasoning',\n text: '',\n providerMetadata: {\n anthropic: {\n redactedData: part.data,\n } satisfies AnthropicReasoningMetadata,\n },\n });\n break;\n }\n case 'tool_use': {\n content.push(\n // when a json response tool is used, the tool call becomes the text:\n usesJsonResponseTool\n ? {\n type: 'text',\n text: JSON.stringify(part.input),\n }\n : {\n type: 'tool-call',\n toolCallId: part.id,\n toolName: part.name,\n input: JSON.stringify(part.input),\n },\n );\n\n break;\n }\n case 'server_tool_use': {\n if (part.name === 'web_search') {\n content.push({\n type: 'tool-call',\n toolCallId: part.id,\n toolName: part.name,\n input: JSON.stringify(part.input),\n providerExecuted: true,\n });\n }\n\n break;\n }\n case 'web_search_tool_result': {\n if (Array.isArray(part.content)) {\n content.push({\n type: 'tool-result',\n toolCallId: part.tool_use_id,\n toolName: 'web_search',\n result: part.content.map(result => ({\n url: result.url,\n title: result.title,\n pageAge: result.page_age ?? null,\n encryptedContent: result.encrypted_content,\n type: result.type,\n })),\n providerExecuted: true,\n });\n\n for (const result of part.content) {\n content.push({\n type: 'source',\n sourceType: 'url',\n id: this.generateId(),\n url: result.url,\n title: result.title,\n providerMetadata: {\n anthropic: {\n pageAge: result.page_age ?? null,\n },\n },\n });\n }\n } else {\n content.push({\n type: 'tool-result',\n toolCallId: part.tool_use_id,\n toolName: 'web_search',\n isError: true,\n result: {\n type: 'web_search_tool_result_error',\n errorCode: part.content.error_code,\n },\n providerExecuted: true,\n });\n }\n break;\n }\n }\n }\n\n return {\n content,\n finishReason: mapAnthropicStopReason({\n finishReason: response.stop_reason,\n isJsonResponseFromTool: usesJsonResponseTool,\n }),\n usage: {\n inputTokens: response.usage.input_tokens,\n outputTokens: response.usage.output_tokens,\n totalTokens: response.usage.input_tokens + response.usage.output_tokens,\n cachedInputTokens: response.usage.cache_read_input_tokens ?? undefined,\n },\n request: { body: args },\n response: {\n id: response.id ?? undefined,\n modelId: response.model ?? undefined,\n headers: responseHeaders,\n body: rawResponse,\n },\n warnings,\n providerMetadata: {\n anthropic: {\n usage: response.usage as JSONObject,\n cacheCreationInputTokens:\n response.usage.cache_creation_input_tokens ?? null,\n },\n },\n };\n }\n\n async doStream(\n options: Parameters<LanguageModelV2['doStream']>[0],\n ): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>> {\n const { args, warnings, betas, usesJsonResponseTool } =\n await this.getArgs(options);\n\n // Extract citation documents for response processing\n const citationDocuments = this.extractCitationDocuments(options.prompt);\n\n const body = { ...args, stream: true };\n\n const { responseHeaders, value: response } = await postJsonToApi({\n url: this.buildRequestUrl(true),\n headers: await this.getHeaders({ betas, headers: options.headers }),\n body: this.transformRequestBody(body),\n failedResponseHandler: anthropicFailedResponseHandler,\n successfulResponseHandler: createEventSourceResponseHandler(\n anthropicMessagesChunkSchema,\n ),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n let finishReason: LanguageModelV2FinishReason = 'unknown';\n const usage: LanguageModelV2Usage = {\n inputTokens: undefined,\n outputTokens: undefined,\n totalTokens: undefined,\n };\n\n const contentBlocks: Record<\n number,\n | {\n type: 'tool-call';\n toolCallId: string;\n toolName: string;\n input: string;\n providerExecuted?: boolean;\n }\n | { type: 'text' | 'reasoning' }\n > = {};\n\n let providerMetadata: AnthropicProviderMetadata | undefined = undefined;\n\n let blockType:\n | 'text'\n | 'thinking'\n | 'tool_use'\n | 'redacted_thinking'\n | 'server_tool_use'\n | 'web_search_tool_result'\n | undefined = undefined;\n\n const generateId = this.generateId;\n\n return {\n stream: response.pipeThrough(\n new TransformStream<\n ParseResult<z.infer<typeof anthropicMessagesChunkSchema>>,\n LanguageModelV2StreamPart\n >({\n start(controller) {\n controller.enqueue({ type: 'stream-start', warnings });\n },\n\n transform(chunk, controller) {\n if (options.includeRawChunks) {\n controller.enqueue({ type: 'raw', rawValue: chunk.rawValue });\n }\n\n if (!chunk.success) {\n controller.enqueue({ type: 'error', error: chunk.error });\n return;\n }\n\n const value = chunk.value;\n\n switch (value.type) {\n case 'ping': {\n return; // ignored\n }\n\n case 'content_block_start': {\n const contentBlockType = value.content_block.type;\n\n blockType = contentBlockType;\n\n switch (contentBlockType) {\n case 'text': {\n contentBlocks[value.index] = { type: 'text' };\n controller.enqueue({\n type: 'text-start',\n id: String(value.index),\n });\n return;\n }\n\n case 'thinking': {\n contentBlocks[value.index] = { type: 'reasoning' };\n controller.enqueue({\n type: 'reasoning-start',\n id: String(value.index),\n });\n return;\n }\n\n case 'redacted_thinking': {\n contentBlocks[value.index] = { type: 'reasoning' };\n controller.enqueue({\n type: 'reasoning-start',\n id: String(value.index),\n providerMetadata: {\n anthropic: {\n redactedData: value.content_block.data,\n } satisfies AnthropicReasoningMetadata,\n },\n });\n return;\n }\n\n case 'tool_use': {\n contentBlocks[value.index] = usesJsonResponseTool\n ? { type: 'text' }\n : {\n type: 'tool-call',\n toolCallId: value.content_block.id,\n toolName: value.content_block.name,\n input: '',\n };\n\n controller.enqueue(\n usesJsonResponseTool\n ? { type: 'text-start', id: String(value.index) }\n : {\n type: 'tool-input-start',\n id: value.content_block.id,\n toolName: value.content_block.name,\n },\n );\n return;\n }\n\n case 'server_tool_use': {\n if (value.content_block.name === 'web_search') {\n contentBlocks[value.index] = {\n type: 'tool-call',\n toolCallId: value.content_block.id,\n toolName: value.content_block.name,\n input: '',\n providerExecuted: true,\n };\n controller.enqueue({\n type: 'tool-input-start',\n id: value.content_block.id,\n toolName: value.content_block.name,\n providerExecuted: true,\n });\n }\n\n return;\n }\n\n case 'web_search_tool_result': {\n const part = value.content_block;\n\n if (Array.isArray(part.content)) {\n controller.enqueue({\n type: 'tool-result',\n toolCallId: part.tool_use_id,\n toolName: 'web_search',\n result: part.content.map(result => ({\n url: result.url,\n title: result.title,\n pageAge: result.page_age ?? null,\n encryptedContent: result.encrypted_content,\n type: result.type,\n })),\n providerExecuted: true,\n });\n\n for (const result of part.content) {\n controller.enqueue({\n type: 'source',\n sourceType: 'url',\n id: generateId(),\n url: result.url,\n title: result.title,\n providerMetadata: {\n anthropic: {\n pageAge: result.page_age ?? null,\n },\n },\n });\n }\n } else {\n controller.enqueue({\n type: 'tool-result',\n toolCallId: part.tool_use_id,\n toolName: 'web_search',\n isError: true,\n result: {\n type: 'web_search_tool_result_error',\n errorCode: part.content.error_code,\n },\n providerExecuted: true,\n });\n }\n return;\n }\n\n default: {\n const _exhaustiveCheck: never = contentBlockType;\n throw new Error(\n `Unsupported content block type: ${_exhaustiveCheck}`,\n );\n }\n }\n }\n\n case 'content_block_stop': {\n // when finishing a tool call block, send the full tool call:\n if (contentBlocks[value.index] != null) {\n const contentBlock = contentBlocks[value.index];\n\n switch (contentBlock.type) {\n case 'text': {\n controller.enqueue({\n type: 'text-end',\n id: String(value.index),\n });\n break;\n }\n\n case 'reasoning': {\n controller.enqueue({\n type: 'reasoning-end',\n id: String(value.index),\n });\n break;\n }\n\n case 'tool-call':\n // when a json response tool is used, the tool call is returned as text,\n // so we ignore the tool call content:\n if (!usesJsonResponseTool) {\n controller.enqueue({\n type: 'tool-input-end',\n id: contentBlock.toolCallId,\n });\n controller.enqueue(contentBlock);\n }\n break;\n }\n\n delete contentBlocks[value.index];\n }\n\n blockType = undefined; // reset block type\n\n return;\n }\n\n case 'content_block_delta': {\n const deltaType = value.delta.type;\n switch (deltaType) {\n case 'text_delta': {\n // when a json response tool is used, the tool call is returned as text,\n // so we ignore the text content:\n if (usesJsonResponseTool) {\n return;\n }\n\n controller.enqueue({\n type: 'text-delta',\n id: String(value.index),\n delta: value.delta.text,\n });\n\n return;\n }\n\n case 'thinking_delta': {\n controller.enqueue({\n type: 'reasoning-delta',\n id: String(value.index),\n delta: value.delta.thinking,\n });\n\n return;\n }\n\n case 'signature_delta': {\n // signature are only supported on thinking blocks:\n if (blockType === 'thinking') {\n controller.enqueue({\n type: 'reasoning-delta',\n id: String(value.index),\n delta: '',\n providerMetadata: {\n anthropic: {\n signature: value.delta.signature,\n } satisfies AnthropicReasoningMetadata,\n },\n });\n }\n\n return;\n }\n\n case 'input_json_delta': {\n const contentBlock = contentBlocks[value.index];\n const delta = value.delta.partial_json;\n\n if (usesJsonResponseTool) {\n if (contentBlock?.type !== 'text') {\n return;\n }\n\n controller.enqueue({\n type: 'text-delta',\n id: String(value.index),\n delta,\n });\n } else {\n if (contentBlock?.type !== 'tool-call') {\n return;\n }\n\n controller.enqueue({\n type: 'tool-input-delta',\n id: contentBlock.toolCallId,\n delta,\n });\n\n contentBlock.input += delta;\n }\n\n return;\n }\n\n case 'citations_delta': {\n const citation = value.delta.citation;\n\n processCitation(\n citation,\n citationDocuments,\n generateId,\n source => controller.enqueue(source),\n );\n // Web search citations are handled in web_search_tool_result content block\n return;\n }\n\n default: {\n const _exhaustiveCheck: never = deltaType;\n throw new Error(\n `Unsupported delta type: ${_exhaustiveCheck}`,\n );\n }\n }\n }\n\n case 'message_start': {\n usage.inputTokens = value.message.usage.input_tokens;\n usage.cachedInputTokens =\n value.message.usage.cache_read_input_tokens ?? undefined;\n\n providerMetadata = {\n anthropic: {\n usage: value.message.usage as JSONObject,\n cacheCreationInputTokens:\n value.message.usage.cache_creation_input_tokens ?? null,\n },\n };\n\n controller.enqueue({\n type: 'response-metadata',\n id: value.message.id ?? undefined,\n modelId: value.message.model ?? undefined,\n });\n\n return;\n }\n\n case 'message_delta': {\n usage.outputTokens = value.usage.output_tokens;\n usage.totalTokens =\n (usage.inputTokens ?? 0) + (value.usage.output_tokens ?? 0);\n\n finishReason = mapAnthropicStopReason({\n finishReason: value.delta.stop_reason,\n isJsonResponseFromTool: usesJsonResponseTool,\n });\n return;\n }\n\n case 'message_stop': {\n controller.enqueue({\n type: 'finish',\n finishReason,\n usage,\n providerMetadata,\n });\n return;\n }\n\n case 'error': {\n controller.enqueue({ type: 'error', error: value.error });\n return;\n }\n\n default: {\n const _exhaustiveCheck: never = value;\n throw new Error(`Unsupported chunk type: ${_exhaustiveCheck}`);\n }\n }\n },\n }),\n ),\n request: { body },\n response: { headers: responseHeaders },\n };\n }\n}\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst anthropicMessagesResponseSchema = z.object({\n type: z.literal('message'),\n id: z.string().nullish(),\n model: z.string().nullish(),\n content: z.array(\n z.discriminatedUnion('type', [\n z.object({\n type: z.literal('text'),\n text: z.string(),\n citations: z.array(citationSchema).optional(),\n }),\n z.object({\n type: z.literal('thinking'),\n thinking: z.string(),\n signature: z.string(),\n }),\n z.object({\n type: z.literal('redacted_thinking'),\n data: z.string(),\n }),\n z.object({\n type: z.literal('tool_use'),\n id: z.string(),\n name: z.string(),\n input: z.unknown(),\n }),\n z.object({\n type: z.literal('server_tool_use'),\n id: z.string(),\n name: z.string(),\n input: z.record(z.string(), z.unknown()).nullish(),\n }),\n z.object({\n type: z.literal('web_search_tool_result'),\n tool_use_id: z.string(),\n content: z.union([\n z.array(\n z.object({\n type: z.literal('web_search_result'),\n url: z.string(),\n title: z.string(),\n encrypted_content: z.string(),\n page_age: z.string().nullish(),\n }),\n ),\n z.object({\n type: z.literal('web_search_tool_result_error'),\n error_code: z.string(),\n }),\n ]),\n }),\n ]),\n ),\n stop_reason: z.string().nullish(),\n usage: z.looseObject({\n input_tokens: z.number(),\n output_tokens: z.number(),\n cache_creation_input_tokens: z.number().nullish(),\n cache_read_input_tokens: z.number().nullish(),\n }),\n});\n\n// limited version of the schema, focused on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst anthropicMessagesChunkSchema = z.discriminatedUnion('type', [\n z.object({\n type: z.literal('message_start'),\n message: z.object({\n id: z.string().nullish(),\n model: z.string().nullish(),\n usage: z.looseObject({\n input_tokens: z.number(),\n output_tokens: z.number(),\n cache_creation_input_tokens: z.number().nullish(),\n cache_read_input_tokens: z.number().nullish(),\n }),\n }),\n }),\n z.object({\n type: z.literal('content_block_start'),\n index: z.number(),\n content_block: z.discriminatedUnion('type', [\n z.object({\n type: z.literal('text'),\n text: z.string(),\n }),\n z.object({\n type: z.literal('thinking'),\n thinking: z.string(),\n }),\n z.object({\n type: z.literal('tool_use'),\n id: z.string(),\n name: z.string(),\n }),\n z.object({\n type: z.literal('redacted_thinking'),\n data: z.string(),\n }),\n z.object({\n type: z.literal('server_tool_use'),\n id: z.string(),\n name: z.string(),\n input: z.record(z.string(), z.unknown()).nullish(),\n }),\n z.object({\n type: z.literal('web_search_tool_result'),\n tool_use_id: z.string(),\n content: z.union([\n z.array(\n z.object({\n type: z.literal('web_search_result'),\n url: z.string(),\n title: z.string(),\n encrypted_content: z.string(),\n page_age: z.string().nullish(),\n }),\n ),\n z.object({\n type: z.literal('web_search_tool_result_error'),\n error_code: z.string(),\n }),\n ]),\n }),\n ]),\n }),\n z.object({\n type: z.literal('content_block_delta'),\n index: z.number(),\n delta: z.discriminatedUnion('type', [\n z.object({\n type: z.literal('input_json_delta'),\n partial_json: z.string(),\n }),\n z.object({\n type: z.literal('text_delta'),\n text: z.string(),\n }),\n z.object({\n type: z.literal('thinking_delta'),\n thinking: z.string(),\n }),\n z.object({\n type: z.literal('signature_delta'),\n signature: z.string(),\n }),\n z.object({\n type: z.literal('citations_delta'),\n citation: citationSchema,\n }),\n ]),\n }),\n z.object({\n type: z.literal('content_block_stop'),\n index: z.number(),\n }),\n z.object({\n type: z.literal('error'),\n error: z.object({\n type: z.string(),\n message: z.string(),\n }),\n }),\n z.object({\n type: z.literal('message_delta'),\n delta: z.object({ stop_reason: z.string().nullish() }),\n usage: z.object({ output_tokens: z.number() }),\n }),\n z.object({\n type: z.literal('message_stop'),\n }),\n z.object({\n type: z.literal('ping'),\n }),\n]);\n\nexport const anthropicReasoningMetadataSchema = z.object({\n signature: z.string().optional(),\n redactedData: z.string().optional(),\n});\n\nexport type AnthropicReasoningMetadata = z.infer<\n typeof anthropicReasoningMetadataSchema\n>;\n","import { createJsonErrorResponseHandler } from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\n\nexport const anthropicErrorDataSchema = z.object({\n type: z.literal('error'),\n error: z.object({\n type: z.string(),\n message: z.string(),\n }),\n});\n\nexport type AnthropicErrorData = z.infer<typeof anthropicErrorDataSchema>;\n\nexport const anthropicFailedResponseHandler = createJsonErrorResponseHandler({\n errorSchema: anthropicErrorDataSchema,\n errorToMessage: data => data.error.message,\n});\n","import { z } from 'zod/v4';\n\n// https://docs.anthropic.com/claude/docs/models-overview\nexport type AnthropicMessagesModelId =\n | 'claude-opus-4-20250514'\n | 'claude-sonnet-4-20250514'\n | 'claude-3-7-sonnet-20250219'\n | 'claude-3-5-sonnet-latest'\n | 'claude-3-5-sonnet-20241022'\n | 'claude-3-5-sonnet-20240620'\n | 'claude-3-5-haiku-latest'\n | 'claude-3-5-haiku-20241022'\n | 'claude-3-opus-latest'\n | 'claude-3-opus-20240229'\n | 'claude-3-sonnet-20240229'\n | 'claude-3-haiku-20240307'\n | (string & {});\n\n/**\n * Anthropic file part provider options for document-specific features.\n * These options apply to individual file parts (documents).\n */\nexport const anthropicFilePartProviderOptions = z.object({\n /**\n * Citation configuration for this document.\n * When enabled, this document will generate citations in the response.\n */\n citations: z\n .object({\n /**\n * Enable citations for this document\n */\n enabled: z.boolean(),\n })\n .optional(),\n\n /**\n * Custom title for the document.\n * If not provided, the filename will be used.\n */\n title: z.string().optional(),\n\n /**\n * Context about the document that will be passed to the model\n * but not used towards cited content.\n * Useful for storing document metadata as text or stringified JSON.\n */\n context: z.string().optional(),\n});\n\nexport type AnthropicFilePartProviderOptions = z.infer<\n typeof anthropicFilePartProviderOptions\n>;\n\nexport const anthropicProviderOptions = z.object({\n sendReasoning: z.boolean().optional(),\n\n thinking: z\n .object({\n type: z.union([z.literal('enabled'), z.literal('disabled')]),\n budgetTokens: z.number().optional(),\n })\n .optional(),\n\n /**\n * Whether to disable parallel function calling during tool use. Default is false.\n * When set to true, Claude will use at most one tool per response.\n */\n disableParallelToolUse: z.boolean().optional(),\n});\n\nexport type AnthropicProviderOptions = z.infer<typeof anthropicProviderOptions>;\n","import {\n LanguageModelV2CallOptions,\n LanguageModelV2CallWarning,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport { AnthropicTool, AnthropicToolChoice } from './anthropic-api-types';\nimport { getCacheControl } from './get-cache-control';\nimport { webSearch_20250305ArgsSchema } from './tool/web-search_20250305';\n\nfunction isWebSearchTool(\n tool: unknown,\n): tool is Extract<AnthropicTool, { type: 'web_search_20250305' }> {\n return (\n typeof tool === 'object' &&\n tool !== null &&\n 'type' in tool &&\n tool.type === 'web_search_20250305'\n );\n}\n\nexport function prepareTools({\n tools,\n toolChoice,\n disableParallelToolUse,\n}: {\n tools: LanguageModelV2CallOptions['tools'];\n toolChoice?: LanguageModelV2CallOptions['toolChoice'];\n disableParallelToolUse?: boolean;\n}): {\n tools: Array<AnthropicTool> | undefined;\n toolChoice: AnthropicToolChoice | undefined;\n toolWarnings: LanguageModelV2CallWarning[];\n betas: Set<string>;\n} {\n // when the tools array is empty, change it to undefined to prevent errors:\n tools = tools?.length ? tools : undefined;\n\n const toolWarnings: LanguageModelV2CallWarning[] = [];\n const betas = new Set<string>();\n\n if (tools == null) {\n return { tools: undefined, toolChoice: undefined, toolWarnings, betas };\n }\n\n const anthropicTools: AnthropicTool[] = [];\n\n for (const tool of tools) {\n // handle direct web search tool objects passed from provider options\n if (isWebSearchTool(tool)) {\n anthropicTools.push(tool);\n continue;\n }\n\n switch (tool.type) {\n case 'function':\n const cacheControl = getCacheControl(tool.providerOptions);\n\n anthropicTools.push({\n name: tool.name,\n description: tool.description,\n input_schema: tool.inputSchema,\n cache_control: cacheControl,\n });\n break;\n case 'provider-defined':\n switch (tool.id) {\n case 'anthropic.computer_20250124':\n betas.add('computer-use-2025-01-24');\n anthropicTools.push({\n name: 'computer',\n type: 'computer_20250124',\n display_width_px: tool.args.displayWidthPx as number,\n display_height_px: tool.args.displayHeightPx as number,\n display_number: tool.args.displayNumber as number,\n });\n break;\n case 'anthropic.computer_20241022':\n betas.add('computer-use-2024-10-22');\n anthropicTools.push({\n name: 'computer',\n type: 'computer_20241022',\n display_width_px: tool.args.displayWidthPx as number,\n display_height_px: tool.args.displayHeightPx as number,\n display_number: tool.args.displayNumber as number,\n });\n break;\n case 'anthropic.text_editor_20250124':\n betas.add('computer-use-2025-01-24');\n anthropicTools.push({\n name: 'str_replace_editor',\n type: 'text_editor_20250124',\n });\n break;\n case 'anthropic.text_editor_20241022':\n betas.add('computer-use-2024-10-22');\n anthropicTools.push({\n name: 'str_replace_editor',\n type: 'text_editor_20241022',\n });\n break;\n case 'anthropic.text_editor_20250429':\n betas.add('computer-use-2025-01-24');\n anthropicTools.push({\n name: 'str_replace_based_edit_tool',\n type: 'text_editor_20250429',\n });\n break;\n case 'anthropic.bash_20250124':\n betas.add('computer-use-2025-01-24');\n anthropicTools.push({\n name: 'bash',\n type: 'bash_20250124',\n });\n break;\n case 'anthropic.bash_20241022':\n betas.add('computer-use-2024-10-22');\n anthropicTools.push({\n name: 'bash',\n type: 'bash_20241022',\n });\n break;\n case 'anthropic.web_search_20250305': {\n const args = webSearch_20250305ArgsSchema.parse(tool.args);\n anthropicTools.push({\n type: 'web_search_20250305',\n name: 'web_search',\n max_uses: args.maxUses,\n allowed_domains: args.allowedDomains,\n blocked_domains: args.blockedDomains,\n user_location: args.userLocation,\n });\n break;\n }\n default:\n toolWarnings.push({ type: 'unsupported-tool', tool });\n break;\n }\n break;\n default:\n toolWarnings.push({ type: 'unsupported-tool', tool });\n break;\n }\n }\n\n if (toolChoice == null) {\n return {\n tools: anthropicTools,\n toolChoice: disableParallelToolUse\n ? { type: 'auto', disable_parallel_tool_use: disableParallelToolUse }\n : undefined,\n toolWarnings,\n betas,\n };\n }\n\n const type = toolChoice.type;\n\n switch (type) {\n case 'auto':\n return {\n tools: anthropicTools,\n toolChoice: {\n type: 'auto',\n disable_parallel_tool_use: disableParallelToolUse,\n },\n toolWarnings,\n