UNPKG

@ai-sdk/anthropic

Version:

The **[Anthropic provider](https://sdk.vercel.ai/providers/ai-sdk-providers/anthropic)** for the [AI SDK](https://sdk.vercel.ai/docs) contains language model support for the [Anthropic Messages API](https://docs.anthropic.com/claude/reference/messages_pos

1 lines 85.1 kB
{"version":3,"sources":["../src/anthropic-provider.ts","../src/anthropic-messages-language-model.ts","../src/anthropic-error.ts","../src/anthropic-prepare-tools.ts","../src/convert-to-anthropic-messages-prompt.ts","../src/map-anthropic-stop-reason.ts","../src/anthropic-tools.ts"],"sourcesContent":["import {\n LanguageModelV1,\n NoSuchModelError,\n ProviderV1,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n} from '@ai-sdk/provider-utils';\nimport { AnthropicMessagesLanguageModel } from './anthropic-messages-language-model';\nimport {\n AnthropicMessagesModelId,\n AnthropicMessagesSettings,\n} from './anthropic-messages-settings';\nimport { anthropicTools } from './anthropic-tools';\n\nexport interface AnthropicProvider extends ProviderV1 {\n /**\nCreates a model for text generation.\n*/\n (\n modelId: AnthropicMessagesModelId,\n settings?: AnthropicMessagesSettings,\n ): LanguageModelV1;\n\n /**\nCreates a model for text generation.\n*/\n languageModel(\n modelId: AnthropicMessagesModelId,\n settings?: AnthropicMessagesSettings,\n ): LanguageModelV1;\n\n /**\n@deprecated Use `.languageModel()` instead.\n*/\n chat(\n modelId: AnthropicMessagesModelId,\n settings?: AnthropicMessagesSettings,\n ): LanguageModelV1;\n\n /**\n@deprecated Use `.languageModel()` instead.\n */\n messages(\n modelId: AnthropicMessagesModelId,\n settings?: AnthropicMessagesSettings,\n ): LanguageModelV1;\n\n /**\nAnthropic-specific computer use tool.\n */\n tools: typeof anthropicTools;\n}\n\nexport interface AnthropicProviderSettings {\n /**\nUse a different URL prefix for API calls, e.g. to use proxy servers.\nThe default prefix is `https://api.anthropic.com/v1`.\n */\n baseURL?: string;\n\n /**\nAPI key that is being send using the `x-api-key` header.\nIt defaults to the `ANTHROPIC_API_KEY` environment variable.\n */\n apiKey?: string;\n\n /**\nCustom headers to include in the requests.\n */\n headers?: Record<string, string>;\n\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n\n generateId?: () => string;\n}\n\n/**\nCreate an Anthropic provider instance.\n */\nexport function createAnthropic(\n options: AnthropicProviderSettings = {},\n): AnthropicProvider {\n const baseURL =\n withoutTrailingSlash(options.baseURL) ?? 'https://api.anthropic.com/v1';\n\n const getHeaders = () => ({\n 'anthropic-version': '2023-06-01',\n 'x-api-key': loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'ANTHROPIC_API_KEY',\n description: 'Anthropic',\n }),\n ...options.headers,\n });\n\n const createChatModel = (\n modelId: AnthropicMessagesModelId,\n settings: AnthropicMessagesSettings = {},\n ) =>\n new AnthropicMessagesLanguageModel(modelId, settings, {\n provider: 'anthropic.messages',\n baseURL,\n headers: getHeaders,\n fetch: options.fetch,\n supportsImageUrls: true,\n });\n\n const provider = function (\n modelId: AnthropicMessagesModelId,\n settings?: AnthropicMessagesSettings,\n ) {\n if (new.target) {\n throw new Error(\n 'The Anthropic model function cannot be called with the new keyword.',\n );\n }\n\n return createChatModel(modelId, settings);\n };\n\n provider.languageModel = createChatModel;\n provider.chat = createChatModel;\n provider.messages = createChatModel;\n provider.textEmbeddingModel = (modelId: string) => {\n throw new NoSuchModelError({ modelId, modelType: 'textEmbeddingModel' });\n };\n\n provider.tools = anthropicTools;\n\n return provider;\n}\n\n/**\nDefault Anthropic provider instance.\n */\nexport const anthropic = createAnthropic();\n","import {\n LanguageModelV1,\n LanguageModelV1CallWarning,\n LanguageModelV1FinishReason,\n LanguageModelV1FunctionToolCall,\n LanguageModelV1ProviderMetadata,\n LanguageModelV1StreamPart,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n ParseResult,\n Resolvable,\n combineHeaders,\n createEventSourceResponseHandler,\n createJsonResponseHandler,\n parseProviderOptions,\n postJsonToApi,\n resolve,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod';\nimport { anthropicFailedResponseHandler } from './anthropic-error';\nimport {\n AnthropicMessagesModelId,\n AnthropicMessagesSettings,\n} from './anthropic-messages-settings';\nimport { prepareTools } from './anthropic-prepare-tools';\nimport { convertToAnthropicMessagesPrompt } from './convert-to-anthropic-messages-prompt';\nimport { mapAnthropicStopReason } from './map-anthropic-stop-reason';\n\ntype AnthropicMessagesConfig = {\n provider: string;\n baseURL: string;\n headers: Resolvable<Record<string, string | undefined>>;\n supportsImageUrls: boolean;\n fetch?: FetchFunction;\n buildRequestUrl?: (baseURL: string, isStreaming: boolean) => string;\n transformRequestBody?: (args: Record<string, any>) => Record<string, any>;\n};\n\nexport class AnthropicMessagesLanguageModel implements LanguageModelV1 {\n readonly specificationVersion = 'v1';\n readonly defaultObjectGenerationMode = 'tool';\n\n readonly modelId: AnthropicMessagesModelId;\n readonly settings: AnthropicMessagesSettings;\n\n private readonly config: AnthropicMessagesConfig;\n\n constructor(\n modelId: AnthropicMessagesModelId,\n settings: AnthropicMessagesSettings,\n config: AnthropicMessagesConfig,\n ) {\n this.modelId = modelId;\n this.settings = settings;\n this.config = config;\n }\n\n supportsUrl(url: URL): boolean {\n return url.protocol === 'https:';\n }\n\n get provider(): string {\n return this.config.provider;\n }\n\n get supportsImageUrls(): boolean {\n return this.config.supportsImageUrls;\n }\n\n private async getArgs({\n mode,\n prompt,\n maxTokens = 4096, // 4096: max model output tokens TODO update default in v5\n temperature,\n topP,\n topK,\n frequencyPenalty,\n presencePenalty,\n stopSequences,\n responseFormat,\n seed,\n providerMetadata: providerOptions,\n }: Parameters<LanguageModelV1['doGenerate']>[0]) {\n const type = mode.type;\n\n const warnings: LanguageModelV1CallWarning[] = [];\n\n if (frequencyPenalty != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'frequencyPenalty',\n });\n }\n\n if (presencePenalty != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'presencePenalty',\n });\n }\n\n if (seed != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'seed',\n });\n }\n\n if (responseFormat != null && responseFormat.type !== 'text') {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'responseFormat',\n details: 'JSON response format is not supported.',\n });\n }\n\n const { prompt: messagesPrompt, betas: messagesBetas } =\n convertToAnthropicMessagesPrompt({\n prompt,\n sendReasoning: this.settings.sendReasoning ?? true,\n warnings,\n });\n\n const anthropicOptions = parseProviderOptions({\n provider: 'anthropic',\n providerOptions,\n schema: anthropicProviderOptionsSchema,\n });\n\n const isThinking = anthropicOptions?.thinking?.type === 'enabled';\n const thinkingBudget = anthropicOptions?.thinking?.budgetTokens;\n\n const baseArgs = {\n // model id:\n model: this.modelId,\n\n // standardized settings:\n max_tokens: maxTokens,\n temperature,\n top_k: topK,\n top_p: topP,\n stop_sequences: stopSequences,\n\n // provider specific settings:\n ...(isThinking && {\n thinking: { type: 'enabled', budget_tokens: thinkingBudget },\n }),\n\n // prompt:\n system: messagesPrompt.system,\n messages: messagesPrompt.messages,\n };\n\n if (isThinking) {\n if (thinkingBudget == null) {\n throw new UnsupportedFunctionalityError({\n functionality: 'thinking requires a budget',\n });\n }\n\n if (baseArgs.temperature != null) {\n baseArgs.temperature = undefined;\n warnings.push({\n type: 'unsupported-setting',\n setting: 'temperature',\n details: 'temperature is not supported when thinking is enabled',\n });\n }\n\n if (topK != null) {\n baseArgs.top_k = undefined;\n warnings.push({\n type: 'unsupported-setting',\n setting: 'topK',\n details: 'topK is not supported when thinking is enabled',\n });\n }\n\n if (topP != null) {\n baseArgs.top_p = undefined;\n warnings.push({\n type: 'unsupported-setting',\n setting: 'topP',\n details: 'topP is not supported when thinking is enabled',\n });\n }\n\n // adjust max tokens to account for thinking:\n baseArgs.max_tokens = maxTokens + thinkingBudget;\n }\n\n switch (type) {\n case 'regular': {\n const {\n tools,\n tool_choice,\n toolWarnings,\n betas: toolsBetas,\n } = prepareTools(mode);\n\n return {\n args: { ...baseArgs, tools, tool_choice },\n warnings: [...warnings, ...toolWarnings],\n betas: new Set([...messagesBetas, ...toolsBetas]),\n };\n }\n\n case 'object-json': {\n throw new UnsupportedFunctionalityError({\n functionality: 'json-mode object generation',\n });\n }\n\n case 'object-tool': {\n const { name, description, parameters } = mode.tool;\n\n return {\n args: {\n ...baseArgs,\n tools: [{ name, description, input_schema: parameters }],\n tool_choice: { type: 'tool', name },\n },\n warnings,\n betas: messagesBetas,\n };\n }\n\n default: {\n const _exhaustiveCheck: never = type;\n throw new Error(`Unsupported type: ${_exhaustiveCheck}`);\n }\n }\n }\n\n private async getHeaders({\n betas,\n headers,\n }: {\n betas: Set<string>;\n headers: Record<string, string | undefined> | undefined;\n }) {\n return combineHeaders(\n await resolve(this.config.headers),\n betas.size > 0 ? { 'anthropic-beta': Array.from(betas).join(',') } : {},\n headers,\n );\n }\n\n private buildRequestUrl(isStreaming: boolean): string {\n return (\n this.config.buildRequestUrl?.(this.config.baseURL, isStreaming) ??\n `${this.config.baseURL}/messages`\n );\n }\n\n private transformRequestBody(args: Record<string, any>): Record<string, any> {\n return this.config.transformRequestBody?.(args) ?? args;\n }\n\n async doGenerate(\n options: Parameters<LanguageModelV1['doGenerate']>[0],\n ): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>> {\n const { args, warnings, betas } = await this.getArgs(options);\n\n const {\n responseHeaders,\n value: response,\n rawValue: rawResponse,\n } = await postJsonToApi({\n url: this.buildRequestUrl(false),\n headers: await this.getHeaders({ betas, headers: options.headers }),\n body: this.transformRequestBody(args),\n failedResponseHandler: anthropicFailedResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(\n anthropicMessagesResponseSchema,\n ),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n const { messages: rawPrompt, ...rawSettings } = args;\n\n // extract text\n let text = '';\n for (const content of response.content) {\n if (content.type === 'text') {\n text += content.text;\n }\n }\n\n // extract tool calls\n let toolCalls: LanguageModelV1FunctionToolCall[] | undefined = undefined;\n if (response.content.some(content => content.type === 'tool_use')) {\n toolCalls = [];\n for (const content of response.content) {\n if (content.type === 'tool_use') {\n toolCalls.push({\n toolCallType: 'function',\n toolCallId: content.id,\n toolName: content.name,\n args: JSON.stringify(content.input),\n });\n }\n }\n }\n\n const reasoning = response.content\n .filter(\n content =>\n content.type === 'redacted_thinking' || content.type === 'thinking',\n )\n .map(content =>\n content.type === 'thinking'\n ? {\n type: 'text' as const,\n text: content.thinking,\n signature: content.signature,\n }\n : {\n type: 'redacted' as const,\n data: content.data,\n },\n );\n\n return {\n text,\n reasoning: reasoning.length > 0 ? reasoning : undefined,\n toolCalls,\n finishReason: mapAnthropicStopReason(response.stop_reason),\n usage: {\n promptTokens: response.usage.input_tokens,\n completionTokens: response.usage.output_tokens,\n },\n rawCall: { rawPrompt, rawSettings },\n rawResponse: {\n headers: responseHeaders,\n body: rawResponse,\n },\n response: {\n id: response.id ?? undefined,\n modelId: response.model ?? undefined,\n },\n warnings,\n providerMetadata: {\n anthropic: {\n cacheCreationInputTokens:\n response.usage.cache_creation_input_tokens ?? null,\n cacheReadInputTokens: response.usage.cache_read_input_tokens ?? null,\n },\n },\n request: { body: JSON.stringify(args) },\n };\n }\n\n async doStream(\n options: Parameters<LanguageModelV1['doStream']>[0],\n ): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>> {\n const { args, warnings, betas } = await this.getArgs(options);\n const body = { ...args, stream: true };\n\n const { responseHeaders, value: response } = await postJsonToApi({\n url: this.buildRequestUrl(true),\n headers: await this.getHeaders({ betas, headers: options.headers }),\n body: this.transformRequestBody(body),\n failedResponseHandler: anthropicFailedResponseHandler,\n successfulResponseHandler: createEventSourceResponseHandler(\n anthropicMessagesChunkSchema,\n ),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n const { messages: rawPrompt, ...rawSettings } = args;\n\n let finishReason: LanguageModelV1FinishReason = 'unknown';\n const usage: { promptTokens: number; completionTokens: number } = {\n promptTokens: Number.NaN,\n completionTokens: Number.NaN,\n };\n\n const toolCallContentBlocks: Record<\n number,\n {\n toolCallId: string;\n toolName: string;\n jsonText: string;\n }\n > = {};\n\n let providerMetadata: LanguageModelV1ProviderMetadata | undefined =\n undefined;\n\n let blockType:\n | 'text'\n | 'thinking'\n | 'tool_use'\n | 'redacted_thinking'\n | undefined = undefined;\n\n return {\n stream: response.pipeThrough(\n new TransformStream<\n ParseResult<z.infer<typeof anthropicMessagesChunkSchema>>,\n LanguageModelV1StreamPart\n >({\n transform(chunk, controller) {\n if (!chunk.success) {\n controller.enqueue({ type: 'error', error: chunk.error });\n return;\n }\n\n const value = chunk.value;\n\n switch (value.type) {\n case 'ping': {\n return; // ignored\n }\n\n case 'content_block_start': {\n const contentBlockType = value.content_block.type;\n\n blockType = contentBlockType;\n\n switch (contentBlockType) {\n case 'text':\n case 'thinking': {\n return; // ignored\n }\n\n case 'redacted_thinking': {\n controller.enqueue({\n type: 'redacted-reasoning',\n data: value.content_block.data,\n });\n return;\n }\n\n case 'tool_use': {\n toolCallContentBlocks[value.index] = {\n toolCallId: value.content_block.id,\n toolName: value.content_block.name,\n jsonText: '',\n };\n return;\n }\n\n default: {\n const _exhaustiveCheck: never = contentBlockType;\n throw new Error(\n `Unsupported content block type: ${_exhaustiveCheck}`,\n );\n }\n }\n }\n\n case 'content_block_stop': {\n // when finishing a tool call block, send the full tool call:\n if (toolCallContentBlocks[value.index] != null) {\n const contentBlock = toolCallContentBlocks[value.index];\n\n controller.enqueue({\n type: 'tool-call',\n toolCallType: 'function',\n toolCallId: contentBlock.toolCallId,\n toolName: contentBlock.toolName,\n args: contentBlock.jsonText,\n });\n\n delete toolCallContentBlocks[value.index];\n }\n\n blockType = undefined; // reset block type\n\n return;\n }\n\n case 'content_block_delta': {\n const deltaType = value.delta.type;\n switch (deltaType) {\n case 'text_delta': {\n controller.enqueue({\n type: 'text-delta',\n textDelta: value.delta.text,\n });\n\n return;\n }\n\n case 'thinking_delta': {\n controller.enqueue({\n type: 'reasoning',\n textDelta: value.delta.thinking,\n });\n\n return;\n }\n\n case 'signature_delta': {\n // signature are only supported on thinking blocks:\n if (blockType === 'thinking') {\n controller.enqueue({\n type: 'reasoning-signature',\n signature: value.delta.signature,\n });\n }\n\n return;\n }\n\n case 'input_json_delta': {\n const contentBlock = toolCallContentBlocks[value.index];\n\n controller.enqueue({\n type: 'tool-call-delta',\n toolCallType: 'function',\n toolCallId: contentBlock.toolCallId,\n toolName: contentBlock.toolName,\n argsTextDelta: value.delta.partial_json,\n });\n\n contentBlock.jsonText += value.delta.partial_json;\n\n return;\n }\n\n default: {\n const _exhaustiveCheck: never = deltaType;\n throw new Error(\n `Unsupported delta type: ${_exhaustiveCheck}`,\n );\n }\n }\n }\n\n case 'message_start': {\n usage.promptTokens = value.message.usage.input_tokens;\n usage.completionTokens = value.message.usage.output_tokens;\n\n providerMetadata = {\n anthropic: {\n cacheCreationInputTokens:\n value.message.usage.cache_creation_input_tokens ?? null,\n cacheReadInputTokens:\n value.message.usage.cache_read_input_tokens ?? null,\n },\n };\n\n controller.enqueue({\n type: 'response-metadata',\n id: value.message.id ?? undefined,\n modelId: value.message.model ?? undefined,\n });\n\n return;\n }\n\n case 'message_delta': {\n usage.completionTokens = value.usage.output_tokens;\n finishReason = mapAnthropicStopReason(value.delta.stop_reason);\n return;\n }\n\n case 'message_stop': {\n controller.enqueue({\n type: 'finish',\n finishReason,\n usage,\n providerMetadata,\n });\n return;\n }\n\n case 'error': {\n controller.enqueue({ type: 'error', error: value.error });\n return;\n }\n\n default: {\n const _exhaustiveCheck: never = value;\n throw new Error(`Unsupported chunk type: ${_exhaustiveCheck}`);\n }\n }\n },\n }),\n ),\n rawCall: { rawPrompt, rawSettings },\n rawResponse: { headers: responseHeaders },\n warnings,\n request: { body: JSON.stringify(body) },\n };\n }\n}\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst anthropicMessagesResponseSchema = z.object({\n type: z.literal('message'),\n id: z.string().nullish(),\n model: z.string().nullish(),\n content: z.array(\n z.discriminatedUnion('type', [\n z.object({\n type: z.literal('text'),\n text: z.string(),\n }),\n z.object({\n type: z.literal('thinking'),\n thinking: z.string(),\n signature: z.string(),\n }),\n z.object({\n type: z.literal('redacted_thinking'),\n data: z.string(),\n }),\n z.object({\n type: z.literal('tool_use'),\n id: z.string(),\n name: z.string(),\n input: z.unknown(),\n }),\n ]),\n ),\n stop_reason: z.string().nullish(),\n usage: z.object({\n input_tokens: z.number(),\n output_tokens: z.number(),\n cache_creation_input_tokens: z.number().nullish(),\n cache_read_input_tokens: z.number().nullish(),\n }),\n});\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst anthropicMessagesChunkSchema = z.discriminatedUnion('type', [\n z.object({\n type: z.literal('message_start'),\n message: z.object({\n id: z.string().nullish(),\n model: z.string().nullish(),\n usage: z.object({\n input_tokens: z.number(),\n output_tokens: z.number(),\n cache_creation_input_tokens: z.number().nullish(),\n cache_read_input_tokens: z.number().nullish(),\n }),\n }),\n }),\n z.object({\n type: z.literal('content_block_start'),\n index: z.number(),\n content_block: z.discriminatedUnion('type', [\n z.object({\n type: z.literal('text'),\n text: z.string(),\n }),\n z.object({\n type: z.literal('thinking'),\n thinking: z.string(),\n }),\n z.object({\n type: z.literal('tool_use'),\n id: z.string(),\n name: z.string(),\n }),\n z.object({\n type: z.literal('redacted_thinking'),\n data: z.string(),\n }),\n ]),\n }),\n z.object({\n type: z.literal('content_block_delta'),\n index: z.number(),\n delta: z.discriminatedUnion('type', [\n z.object({\n type: z.literal('input_json_delta'),\n partial_json: z.string(),\n }),\n z.object({\n type: z.literal('text_delta'),\n text: z.string(),\n }),\n z.object({\n type: z.literal('thinking_delta'),\n thinking: z.string(),\n }),\n z.object({\n type: z.literal('signature_delta'),\n signature: z.string(),\n }),\n ]),\n }),\n z.object({\n type: z.literal('content_block_stop'),\n index: z.number(),\n }),\n z.object({\n type: z.literal('error'),\n error: z.object({\n type: z.string(),\n message: z.string(),\n }),\n }),\n z.object({\n type: z.literal('message_delta'),\n delta: z.object({ stop_reason: z.string().nullish() }),\n usage: z.object({ output_tokens: z.number() }),\n }),\n z.object({\n type: z.literal('message_stop'),\n }),\n z.object({\n type: z.literal('ping'),\n }),\n]);\n\nconst anthropicProviderOptionsSchema = z.object({\n thinking: z\n .object({\n type: z.union([z.literal('enabled'), z.literal('disabled')]),\n budgetTokens: z.number().optional(),\n })\n .optional(),\n});\n\nexport type AnthropicProviderOptions = z.infer<\n typeof anthropicProviderOptionsSchema\n>;\n","import { createJsonErrorResponseHandler } from '@ai-sdk/provider-utils';\nimport { z } from 'zod';\n\nconst anthropicErrorDataSchema = z.object({\n type: z.literal('error'),\n error: z.object({\n type: z.string(),\n message: z.string(),\n }),\n});\n\nexport type AnthropicErrorData = z.infer<typeof anthropicErrorDataSchema>;\n\nexport const anthropicFailedResponseHandler = createJsonErrorResponseHandler({\n errorSchema: anthropicErrorDataSchema,\n errorToMessage: data => data.error.message,\n});\n","import {\n LanguageModelV1,\n LanguageModelV1CallWarning,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport { AnthropicTool, AnthropicToolChoice } from './anthropic-api-types';\n\nexport function prepareTools(\n mode: Parameters<LanguageModelV1['doGenerate']>[0]['mode'] & {\n type: 'regular';\n },\n): {\n tools: Array<AnthropicTool> | undefined;\n tool_choice: AnthropicToolChoice | undefined;\n toolWarnings: LanguageModelV1CallWarning[];\n betas: Set<string>;\n} {\n // when the tools array is empty, change it to undefined to prevent errors:\n const tools = mode.tools?.length ? mode.tools : undefined;\n\n const toolWarnings: LanguageModelV1CallWarning[] = [];\n const betas = new Set<string>();\n\n if (tools == null) {\n return { tools: undefined, tool_choice: undefined, toolWarnings, betas };\n }\n\n const anthropicTools: AnthropicTool[] = [];\n\n for (const tool of tools) {\n switch (tool.type) {\n case 'function':\n anthropicTools.push({\n name: tool.name,\n description: tool.description,\n input_schema: tool.parameters,\n });\n break;\n case 'provider-defined':\n switch (tool.id) {\n case 'anthropic.computer_20250124':\n betas.add('computer-use-2025-01-24');\n anthropicTools.push({\n name: tool.name,\n type: 'computer_20250124',\n display_width_px: tool.args.displayWidthPx as number,\n display_height_px: tool.args.displayHeightPx as number,\n display_number: tool.args.displayNumber as number,\n });\n break;\n case 'anthropic.computer_20241022':\n betas.add('computer-use-2024-10-22');\n anthropicTools.push({\n name: tool.name,\n type: 'computer_20241022',\n display_width_px: tool.args.displayWidthPx as number,\n display_height_px: tool.args.displayHeightPx as number,\n display_number: tool.args.displayNumber as number,\n });\n break;\n case 'anthropic.text_editor_20250124':\n betas.add('computer-use-2025-01-24');\n anthropicTools.push({\n name: tool.name,\n type: 'text_editor_20250124',\n });\n break;\n case 'anthropic.text_editor_20241022':\n betas.add('computer-use-2024-10-22');\n anthropicTools.push({\n name: tool.name,\n type: 'text_editor_20241022',\n });\n break;\n case 'anthropic.bash_20250124':\n betas.add('computer-use-2025-01-24');\n anthropicTools.push({\n name: tool.name,\n type: 'bash_20250124',\n });\n break;\n case 'anthropic.bash_20241022':\n betas.add('computer-use-2024-10-22');\n anthropicTools.push({\n name: tool.name,\n type: 'bash_20241022',\n });\n break;\n default:\n toolWarnings.push({ type: 'unsupported-tool', tool });\n break;\n }\n break;\n default:\n toolWarnings.push({ type: 'unsupported-tool', tool });\n break;\n }\n }\n\n const toolChoice = mode.toolChoice;\n\n if (toolChoice == null) {\n return {\n tools: anthropicTools,\n tool_choice: undefined,\n toolWarnings,\n betas,\n };\n }\n\n const type = toolChoice.type;\n\n switch (type) {\n case 'auto':\n return {\n tools: anthropicTools,\n tool_choice: { type: 'auto' },\n toolWarnings,\n betas,\n };\n case 'required':\n return {\n tools: anthropicTools,\n tool_choice: { type: 'any' },\n toolWarnings,\n betas,\n };\n case 'none':\n // Anthropic does not support 'none' tool choice, so we remove the tools:\n return { tools: undefined, tool_choice: undefined, toolWarnings, betas };\n case 'tool':\n return {\n tools: anthropicTools,\n tool_choice: { type: 'tool', name: toolChoice.toolName },\n toolWarnings,\n betas,\n };\n default: {\n const _exhaustiveCheck: never = type;\n throw new UnsupportedFunctionalityError({\n functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`,\n });\n }\n }\n}\n","import {\n LanguageModelV1CallWarning,\n LanguageModelV1Message,\n LanguageModelV1Prompt,\n LanguageModelV1ProviderMetadata,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils';\nimport {\n AnthropicAssistantMessage,\n AnthropicCacheControl,\n AnthropicMessagesPrompt,\n AnthropicUserMessage,\n} from './anthropic-api-types';\n\nexport function convertToAnthropicMessagesPrompt({\n prompt,\n sendReasoning,\n warnings,\n}: {\n prompt: LanguageModelV1Prompt;\n sendReasoning: boolean;\n warnings: LanguageModelV1CallWarning[];\n}): {\n prompt: AnthropicMessagesPrompt;\n betas: Set<string>;\n} {\n const betas = new Set<string>();\n const blocks = groupIntoBlocks(prompt);\n\n let system: AnthropicMessagesPrompt['system'] = undefined;\n const messages: AnthropicMessagesPrompt['messages'] = [];\n\n function getCacheControl(\n providerMetadata: LanguageModelV1ProviderMetadata | undefined,\n ): AnthropicCacheControl | undefined {\n const anthropic = providerMetadata?.anthropic;\n\n // allow both cacheControl and cache_control:\n const cacheControlValue =\n anthropic?.cacheControl ?? anthropic?.cache_control;\n\n // Pass through value assuming it is of the correct type.\n // The Anthropic API will validate the value.\n return cacheControlValue as AnthropicCacheControl | undefined;\n }\n\n for (let i = 0; i < blocks.length; i++) {\n const block = blocks[i];\n const isLastBlock = i === blocks.length - 1;\n const type = block.type;\n\n switch (type) {\n case 'system': {\n if (system != null) {\n throw new UnsupportedFunctionalityError({\n functionality:\n 'Multiple system messages that are separated by user/assistant messages',\n });\n }\n\n system = block.messages.map(({ content, providerMetadata }) => ({\n type: 'text',\n text: content,\n cache_control: getCacheControl(providerMetadata),\n }));\n\n break;\n }\n\n case 'user': {\n // combines all user and tool messages in this block into a single message:\n const anthropicContent: AnthropicUserMessage['content'] = [];\n\n for (const message of block.messages) {\n const { role, content } = message;\n switch (role) {\n case 'user': {\n for (let j = 0; j < content.length; j++) {\n const part = content[j];\n\n // cache control: first add cache control from part.\n // for the last part of a message,\n // check also if the message has cache control.\n const isLastPart = j === content.length - 1;\n\n const cacheControl =\n getCacheControl(part.providerMetadata) ??\n (isLastPart\n ? getCacheControl(message.providerMetadata)\n : undefined);\n\n switch (part.type) {\n case 'text': {\n anthropicContent.push({\n type: 'text',\n text: part.text,\n cache_control: cacheControl,\n });\n break;\n }\n\n case 'image': {\n anthropicContent.push({\n type: 'image',\n source:\n part.image instanceof URL\n ? {\n type: 'url',\n url: part.image.toString(),\n }\n : {\n type: 'base64',\n media_type: part.mimeType ?? 'image/jpeg',\n data: convertUint8ArrayToBase64(part.image),\n },\n cache_control: cacheControl,\n });\n\n break;\n }\n\n case 'file': {\n if (part.mimeType !== 'application/pdf') {\n throw new UnsupportedFunctionalityError({\n functionality: 'Non-PDF files in user messages',\n });\n }\n\n betas.add('pdfs-2024-09-25');\n\n anthropicContent.push({\n type: 'document',\n source:\n part.data instanceof URL\n ? {\n type: 'url',\n url: part.data.toString(),\n }\n : {\n type: 'base64',\n media_type: 'application/pdf',\n data: part.data,\n },\n cache_control: cacheControl,\n });\n\n break;\n }\n }\n }\n\n break;\n }\n case 'tool': {\n for (let i = 0; i < content.length; i++) {\n const part = content[i];\n\n // cache control: first add cache control from part.\n // for the last part of a message,\n // check also if the message has cache control.\n const isLastPart = i === content.length - 1;\n\n const cacheControl =\n getCacheControl(part.providerMetadata) ??\n (isLastPart\n ? getCacheControl(message.providerMetadata)\n : undefined);\n\n const toolResultContent =\n part.content != null\n ? part.content.map(part => {\n switch (part.type) {\n case 'text':\n return {\n type: 'text' as const,\n text: part.text,\n cache_control: undefined,\n };\n case 'image':\n return {\n type: 'image' as const,\n source: {\n type: 'base64' as const,\n media_type: part.mimeType ?? 'image/jpeg',\n data: part.data,\n },\n cache_control: undefined,\n };\n }\n })\n : JSON.stringify(part.result);\n\n anthropicContent.push({\n type: 'tool_result',\n tool_use_id: part.toolCallId,\n content: toolResultContent,\n is_error: part.isError,\n cache_control: cacheControl,\n });\n }\n\n break;\n }\n default: {\n const _exhaustiveCheck: never = role;\n throw new Error(`Unsupported role: ${_exhaustiveCheck}`);\n }\n }\n }\n\n messages.push({ role: 'user', content: anthropicContent });\n\n break;\n }\n\n case 'assistant': {\n // combines multiple assistant messages in this block into a single message:\n const anthropicContent: AnthropicAssistantMessage['content'] = [];\n\n for (let j = 0; j < block.messages.length; j++) {\n const message = block.messages[j];\n const isLastMessage = j === block.messages.length - 1;\n const { content } = message;\n\n for (let k = 0; k < content.length; k++) {\n const part = content[k];\n const isLastContentPart = k === content.length - 1;\n\n // cache control: first add cache control from part.\n // for the last part of a message,\n // check also if the message has cache control.\n const cacheControl =\n getCacheControl(part.providerMetadata) ??\n (isLastContentPart\n ? getCacheControl(message.providerMetadata)\n : undefined);\n\n switch (part.type) {\n case 'text': {\n anthropicContent.push({\n type: 'text',\n text:\n // trim the last text part if it's the last message in the block\n // because Anthropic does not allow trailing whitespace\n // in pre-filled assistant responses\n isLastBlock && isLastMessage && isLastContentPart\n ? part.text.trim()\n : part.text,\n\n cache_control: cacheControl,\n });\n break;\n }\n\n case 'reasoning': {\n if (sendReasoning) {\n anthropicContent.push({\n type: 'thinking',\n thinking: part.text,\n signature: part.signature!,\n cache_control: cacheControl,\n });\n } else {\n warnings.push({\n type: 'other',\n message:\n 'sending reasoning content is disabled for this model',\n });\n }\n break;\n }\n\n case 'redacted-reasoning': {\n anthropicContent.push({\n type: 'redacted_thinking',\n data: part.data,\n cache_control: cacheControl,\n });\n break;\n }\n\n case 'tool-call': {\n anthropicContent.push({\n type: 'tool_use',\n id: part.toolCallId,\n name: part.toolName,\n input: part.args,\n cache_control: cacheControl,\n });\n break;\n }\n }\n }\n }\n\n messages.push({ role: 'assistant', content: anthropicContent });\n\n break;\n }\n\n default: {\n const _exhaustiveCheck: never = type;\n throw new Error(`Unsupported type: ${_exhaustiveCheck}`);\n }\n }\n }\n\n return {\n prompt: { system, messages },\n betas,\n };\n}\n\ntype SystemBlock = {\n type: 'system';\n messages: Array<LanguageModelV1Message & { role: 'system' }>;\n};\ntype AssistantBlock = {\n type: 'assistant';\n messages: Array<LanguageModelV1Message & { role: 'assistant' }>;\n};\ntype UserBlock = {\n type: 'user';\n messages: Array<LanguageModelV1Message & { role: 'user' | 'tool' }>;\n};\n\nfunction groupIntoBlocks(\n prompt: LanguageModelV1Prompt,\n): Array<SystemBlock | AssistantBlock | UserBlock> {\n const blocks: Array<SystemBlock | AssistantBlock | UserBlock> = [];\n let currentBlock: SystemBlock | AssistantBlock | UserBlock | undefined =\n undefined;\n\n for (const message of prompt) {\n const { role } = message;\n switch (role) {\n case 'system': {\n if (currentBlock?.type !== 'system') {\n currentBlock = { type: 'system', messages: [] };\n blocks.push(currentBlock);\n }\n\n currentBlock.messages.push(message);\n break;\n }\n case 'assistant': {\n if (currentBlock?.type !== 'assistant') {\n currentBlock = { type: 'assistant', messages: [] };\n blocks.push(currentBlock);\n }\n\n currentBlock.messages.push(message);\n break;\n }\n case 'user': {\n if (currentBlock?.type !== 'user') {\n currentBlock = { type: 'user', messages: [] };\n blocks.push(currentBlock);\n }\n\n currentBlock.messages.push(message);\n break;\n }\n case 'tool': {\n if (currentBlock?.type !== 'user') {\n currentBlock = { type: 'user', messages: [] };\n blocks.push(currentBlock);\n }\n\n currentBlock.messages.push(message);\n break;\n }\n default: {\n const _exhaustiveCheck: never = role;\n throw new Error(`Unsupported role: ${_exhaustiveCheck}`);\n }\n }\n }\n\n return blocks;\n}\n","import { LanguageModelV1FinishReason } from '@ai-sdk/provider';\n\nexport function mapAnthropicStopReason(\n finishReason: string | null | undefined,\n): LanguageModelV1FinishReason {\n switch (finishReason) {\n case 'end_turn':\n case 'stop_sequence':\n return 'stop';\n case 'tool_use':\n return 'tool-calls';\n case 'max_tokens':\n return 'length';\n default:\n return 'unknown';\n }\n}\n","import { z } from 'zod';\n\n// Copied from ai package\ntype ExecuteFunction<PARAMETERS, RESULT> =\n | undefined\n | ((\n args: PARAMETERS,\n options: { abortSignal?: AbortSignal },\n ) => Promise<RESULT>);\n\n// Copied from ai package\nexport type ToolResultContent = Array<\n | {\n type: 'text';\n text: string;\n }\n | {\n type: 'image';\n data: string; // base64 encoded png image, e.g. screenshot\n mimeType?: string; // e.g. 'image/png';\n }\n>;\n\nconst Bash20241022Parameters = z.object({\n command: z.string(),\n restart: z.boolean().optional(),\n});\n\n/**\n * Creates a tool for running a bash command. Must have name \"bash\".\n *\n * Image results are supported.\n *\n * @param execute - The function to execute the tool. Optional.\n */\nfunction bashTool_20241022<RESULT>(\n options: {\n execute?: ExecuteFunction<\n {\n /**\n * The bash command to run. Required unless the tool is being restarted.\n */\n command: string;\n\n /**\n * Specifying true will restart this tool. Otherwise, leave this unspecified.\n */\n restart?: boolean;\n },\n RESULT\n >;\n experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;\n } = {},\n): {\n type: 'provider-defined';\n id: 'anthropic.bash_20241022';\n args: {};\n parameters: typeof Bash20241022Parameters;\n execute: ExecuteFunction<z.infer<typeof Bash20241022Parameters>, RESULT>;\n experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;\n} {\n return {\n type: 'provider-defined',\n id: 'anthropic.bash_20241022',\n args: {},\n parameters: Bash20241022Parameters,\n execute: options.execute,\n experimental_toToolResultContent: options.experimental_toToolResultContent,\n };\n}\n\nconst Bash20250124Parameters = z.object({\n command: z.string(),\n restart: z.boolean().optional(),\n});\n\n/**\n * Creates a tool for running a bash command. Must have name \"bash\".\n *\n * Image results are supported.\n *\n * @param execute - The function to execute the tool. Optional.\n */\nfunction bashTool_20250124<RESULT>(\n options: {\n execute?: ExecuteFunction<\n {\n /**\n * The bash command to run. Required unless the tool is being restarted.\n */\n command: string;\n\n /**\n * Specifying true will restart this tool. Otherwise, leave this unspecified.\n */\n restart?: boolean;\n },\n RESULT\n >;\n experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;\n } = {},\n): {\n type: 'provider-defined';\n id: 'anthropic.bash_20250124';\n args: {};\n parameters: typeof Bash20250124Parameters;\n execute: ExecuteFunction<z.infer<typeof Bash20250124Parameters>, RESULT>;\n experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;\n} {\n return {\n type: 'provider-defined',\n id: 'anthropic.bash_20250124',\n args: {},\n parameters: Bash20250124Parameters,\n execute: options.execute,\n experimental_toToolResultContent: options.experimental_toToolResultContent,\n };\n}\n\nconst TextEditor20241022Parameters = z.object({\n command: z.enum(['view', 'create', 'str_replace', 'insert', 'undo_edit']),\n path: z.string(),\n file_text: z.string().optional(),\n insert_line: z.number().int().optional(),\n new_str: z.string().optional(),\n old_str: z.string().optional(),\n view_range: z.array(z.number().int()).optional(),\n});\n\n/**\n * Creates a tool for editing text. Must have name \"str_replace_editor\".\n *\n * Image results are supported.\n *\n * @param execute - The function to execute the tool. Optional.\n */\nfunction textEditorTool_20241022<RESULT>(\n options: {\n execute?: ExecuteFunction<\n {\n /**\n * The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.\n */\n command: 'view' | 'create' | 'str_replace' | 'insert' | 'undo_edit';\n\n /**\n * Absolute path to file or directory, e.g. `/repo/file.py` or `/repo`.\n */\n path: string;\n\n /**\n * Required parameter of `create` command, with the content of the file to be created.\n */\n file_text?: string;\n\n /**\n * Required parameter of `insert` command. The `new_str` will be inserted AFTER the line `insert_line` of `path`.\n */\n insert_line?: number;\n\n /**\n * Optional parameter of `str_replace` command containing the new string (if not given, no string will be added). Required parameter of `insert` command containing the string to insert.\n */\n new_str?: string;\n\n /**\n * Required parameter of `str_replace` command containing the string in `path` to replace.\n */\n old_str?: string;\n\n /**\n * Optional parameter of `view` command when `path` points to a file. If none is given, the full file is shown. If provided, the file will be shown in the indicated line number range, e.g. [11, 12] will show lines 11 and 12. Indexing at 1 to start. Setting `[start_line, -1]` shows all lines from `start_line` to the end of the file.\n */\n view_range?: number[];\n },\n RESULT\n >;\n experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;\n } = {},\n): {\n type: 'provider-defined';\n id: 'anthropic.text_editor_20241022';\n args: {};\n parameters: typeof TextEditor20241022Parameters;\n execute: ExecuteFunction<\n z.infer<typeof TextEditor20241022Parameters>,\n RESULT\n >;\n experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;\n} {\n return {\n type: 'provider-defined',\n id: 'anthropic.text_editor_20241022',\n args: {},\n parameters: TextEditor20241022Parameters,\n execute: options.execute,\n experimental_toToolResultContent: options.experimental_toToolResultContent,\n };\n}\n\nconst TextEditor20250124Parameters = z.object({\n command: z.enum(['view', 'create', 'str_replace', 'insert', 'undo_edit']),\n path: z.string(),\n file_text: z.string().optional(),\n insert_line: z.number().int().optional(),\n new_str: z.string().optional(),\n old_str: z.string().optional(),\n view_range: z.array(z.number().int()).optional(),\n});\n\n/**\n * Creates a tool for editing text. Must have name \"str_replace_editor\".\n *\n * Image results are supported.\n *\n * @param execute - The function to execute the tool. Optional.\n */\nfunction