UNPKG

@ai-sdk/google

Version:

The **[Google Generative AI provider](https://ai-sdk.dev/providers/ai-sdk-providers/google-generative-ai)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the [Google Generative AI](https://ai.google/discover/generativeai/)

1 lines 128 kB
{"version":3,"sources":["../src/google-provider.ts","../src/version.ts","../src/google-generative-ai-embedding-model.ts","../src/google-error.ts","../src/google-generative-ai-embedding-options.ts","../src/google-generative-ai-language-model.ts","../src/convert-google-generative-ai-usage.ts","../src/convert-json-schema-to-openapi-schema.ts","../src/convert-to-google-generative-ai-messages.ts","../src/get-model-path.ts","../src/google-generative-ai-options.ts","../src/google-prepare-tools.ts","../src/map-google-generative-ai-finish-reason.ts","../src/tool/code-execution.ts","../src/tool/enterprise-web-search.ts","../src/tool/file-search.ts","../src/tool/google-maps.ts","../src/tool/google-search.ts","../src/tool/url-context.ts","../src/tool/vertex-rag-store.ts","../src/google-tools.ts","../src/google-generative-ai-image-model.ts"],"sourcesContent":["import {\n EmbeddingModelV3,\n LanguageModelV3,\n ProviderV3,\n ImageModelV3,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n generateId,\n loadApiKey,\n withoutTrailingSlash,\n withUserAgentSuffix,\n} from '@ai-sdk/provider-utils';\nimport { VERSION } from './version';\nimport { GoogleGenerativeAIEmbeddingModel } from './google-generative-ai-embedding-model';\nimport { GoogleGenerativeAIEmbeddingModelId } from './google-generative-ai-embedding-options';\nimport { GoogleGenerativeAILanguageModel } from './google-generative-ai-language-model';\nimport { GoogleGenerativeAIModelId } from './google-generative-ai-options';\nimport { googleTools } from './google-tools';\n\nimport {\n GoogleGenerativeAIImageSettings,\n GoogleGenerativeAIImageModelId,\n} from './google-generative-ai-image-settings';\nimport { GoogleGenerativeAIImageModel } from './google-generative-ai-image-model';\n\nexport interface GoogleGenerativeAIProvider extends ProviderV3 {\n (modelId: GoogleGenerativeAIModelId): LanguageModelV3;\n\n languageModel(modelId: GoogleGenerativeAIModelId): LanguageModelV3;\n\n chat(modelId: GoogleGenerativeAIModelId): LanguageModelV3;\n\n /**\nCreates a model for image generation.\n */\n image(\n modelId: GoogleGenerativeAIImageModelId,\n settings?: GoogleGenerativeAIImageSettings,\n ): ImageModelV3;\n\n /**\n * @deprecated Use `chat()` instead.\n */\n generativeAI(modelId: GoogleGenerativeAIModelId): LanguageModelV3;\n\n /**\n * Creates a model for text embeddings.\n */\n embedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;\n\n /**\n * Creates a model for text embeddings.\n */\n embeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;\n\n /**\n * @deprecated Use `embedding` instead.\n */\n textEmbedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;\n\n /**\n * @deprecated Use `embeddingModel` instead.\n */\n textEmbeddingModel(\n modelId: GoogleGenerativeAIEmbeddingModelId,\n ): EmbeddingModelV3;\n\n tools: typeof googleTools;\n}\n\nexport interface GoogleGenerativeAIProviderSettings {\n /**\nUse a different URL prefix for API calls, e.g. to use proxy servers.\nThe default prefix is `https://generativelanguage.googleapis.com/v1beta`.\n */\n baseURL?: string;\n\n /**\nAPI key that is being send using the `x-goog-api-key` header.\nIt defaults to the `GOOGLE_GENERATIVE_AI_API_KEY` environment variable.\n */\n apiKey?: string;\n\n /**\nCustom headers to include in the requests.\n */\n headers?: Record<string, string | undefined>;\n\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n\n /**\nOptional function to generate a unique ID for each request.\n */\n generateId?: () => string;\n\n /**\n * Custom provider name\n * Defaults to 'google.generative-ai'.\n */\n name?: string;\n}\n\n/**\nCreate a Google Generative AI provider instance.\n */\nexport function createGoogleGenerativeAI(\n options: GoogleGenerativeAIProviderSettings = {},\n): GoogleGenerativeAIProvider {\n const baseURL =\n withoutTrailingSlash(options.baseURL) ??\n 'https://generativelanguage.googleapis.com/v1beta';\n\n const providerName = options.name ?? 'google.generative-ai';\n\n const getHeaders = () =>\n withUserAgentSuffix(\n {\n 'x-goog-api-key': loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'GOOGLE_GENERATIVE_AI_API_KEY',\n description: 'Google Generative AI',\n }),\n ...options.headers,\n },\n `ai-sdk/google/${VERSION}`,\n );\n\n const createChatModel = (modelId: GoogleGenerativeAIModelId) =>\n new GoogleGenerativeAILanguageModel(modelId, {\n provider: providerName,\n baseURL,\n headers: getHeaders,\n generateId: options.generateId ?? generateId,\n supportedUrls: () => ({\n '*': [\n // Google Generative Language \"files\" endpoint\n // e.g. https://generativelanguage.googleapis.com/v1beta/files/...\n new RegExp(`^${baseURL}/files/.*$`),\n // YouTube URLs (public or unlisted videos)\n new RegExp(\n `^https://(?:www\\\\.)?youtube\\\\.com/watch\\\\?v=[\\\\w-]+(?:&[\\\\w=&.-]*)?$`,\n ),\n new RegExp(`^https://youtu\\\\.be/[\\\\w-]+(?:\\\\?[\\\\w=&.-]*)?$`),\n ],\n }),\n fetch: options.fetch,\n });\n\n const createEmbeddingModel = (modelId: GoogleGenerativeAIEmbeddingModelId) =>\n new GoogleGenerativeAIEmbeddingModel(modelId, {\n provider: providerName,\n baseURL,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createImageModel = (\n modelId: GoogleGenerativeAIImageModelId,\n settings: GoogleGenerativeAIImageSettings = {},\n ) =>\n new GoogleGenerativeAIImageModel(modelId, settings, {\n provider: providerName,\n baseURL,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const provider = function (modelId: GoogleGenerativeAIModelId) {\n if (new.target) {\n throw new Error(\n 'The Google Generative AI model function cannot be called with the new keyword.',\n );\n }\n\n return createChatModel(modelId);\n };\n\n provider.specificationVersion = 'v3' as const;\n provider.languageModel = createChatModel;\n provider.chat = createChatModel;\n provider.generativeAI = createChatModel;\n provider.embedding = createEmbeddingModel;\n provider.embeddingModel = createEmbeddingModel;\n provider.textEmbedding = createEmbeddingModel;\n provider.textEmbeddingModel = createEmbeddingModel;\n provider.image = createImageModel;\n provider.imageModel = createImageModel;\n provider.tools = googleTools;\n\n return provider as GoogleGenerativeAIProvider;\n}\n\n/**\nDefault Google Generative AI provider instance.\n */\nexport const google = createGoogleGenerativeAI();\n","// Version string of this package injected at build time.\ndeclare const __PACKAGE_VERSION__: string | undefined;\nexport const VERSION: string =\n typeof __PACKAGE_VERSION__ !== 'undefined'\n ? __PACKAGE_VERSION__\n : '0.0.0-test';\n","import {\n EmbeddingModelV3,\n TooManyEmbeddingValuesForCallError,\n} from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createJsonResponseHandler,\n FetchFunction,\n lazySchema,\n parseProviderOptions,\n postJsonToApi,\n resolve,\n zodSchema,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport { googleFailedResponseHandler } from './google-error';\nimport {\n GoogleGenerativeAIEmbeddingModelId,\n googleGenerativeAIEmbeddingProviderOptions,\n} from './google-generative-ai-embedding-options';\n\ntype GoogleGenerativeAIEmbeddingConfig = {\n provider: string;\n baseURL: string;\n headers: () => Record<string, string | undefined>;\n fetch?: FetchFunction;\n};\n\nexport class GoogleGenerativeAIEmbeddingModel implements EmbeddingModelV3 {\n readonly specificationVersion = 'v3';\n readonly modelId: GoogleGenerativeAIEmbeddingModelId;\n readonly maxEmbeddingsPerCall = 2048;\n readonly supportsParallelCalls = true;\n\n private readonly config: GoogleGenerativeAIEmbeddingConfig;\n\n get provider(): string {\n return this.config.provider;\n }\n constructor(\n modelId: GoogleGenerativeAIEmbeddingModelId,\n config: GoogleGenerativeAIEmbeddingConfig,\n ) {\n this.modelId = modelId;\n this.config = config;\n }\n\n async doEmbed({\n values,\n headers,\n abortSignal,\n providerOptions,\n }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<\n Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>\n > {\n // Parse provider options\n const googleOptions = await parseProviderOptions({\n provider: 'google',\n providerOptions,\n schema: googleGenerativeAIEmbeddingProviderOptions,\n });\n\n if (values.length > this.maxEmbeddingsPerCall) {\n throw new TooManyEmbeddingValuesForCallError({\n provider: this.provider,\n modelId: this.modelId,\n maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,\n values,\n });\n }\n\n const mergedHeaders = combineHeaders(\n await resolve(this.config.headers),\n headers,\n );\n\n // For single embeddings, use the single endpoint (ratelimits, etc.)\n if (values.length === 1) {\n const {\n responseHeaders,\n value: response,\n rawValue,\n } = await postJsonToApi({\n url: `${this.config.baseURL}/models/${this.modelId}:embedContent`,\n headers: mergedHeaders,\n body: {\n model: `models/${this.modelId}`,\n content: {\n parts: [{ text: values[0] }],\n },\n outputDimensionality: googleOptions?.outputDimensionality,\n taskType: googleOptions?.taskType,\n },\n failedResponseHandler: googleFailedResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(\n googleGenerativeAISingleEmbeddingResponseSchema,\n ),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n warnings: [],\n embeddings: [response.embedding.values],\n usage: undefined,\n response: { headers: responseHeaders, body: rawValue },\n };\n }\n\n const {\n responseHeaders,\n value: response,\n rawValue,\n } = await postJsonToApi({\n url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,\n headers: mergedHeaders,\n body: {\n requests: values.map(value => ({\n model: `models/${this.modelId}`,\n content: { role: 'user', parts: [{ text: value }] },\n outputDimensionality: googleOptions?.outputDimensionality,\n taskType: googleOptions?.taskType,\n })),\n },\n failedResponseHandler: googleFailedResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(\n googleGenerativeAITextEmbeddingResponseSchema,\n ),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n warnings: [],\n embeddings: response.embeddings.map(item => item.values),\n usage: undefined,\n response: { headers: responseHeaders, body: rawValue },\n };\n }\n}\n\n// minimal version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst googleGenerativeAITextEmbeddingResponseSchema = lazySchema(() =>\n zodSchema(\n z.object({\n embeddings: z.array(z.object({ values: z.array(z.number()) })),\n }),\n ),\n);\n\n// Schema for single embedding response\nconst googleGenerativeAISingleEmbeddingResponseSchema = lazySchema(() =>\n zodSchema(\n z.object({\n embedding: z.object({ values: z.array(z.number()) }),\n }),\n ),\n);\n","import {\n createJsonErrorResponseHandler,\n type InferSchema,\n lazySchema,\n zodSchema,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\n\nconst googleErrorDataSchema = lazySchema(() =>\n zodSchema(\n z.object({\n error: z.object({\n code: z.number().nullable(),\n message: z.string(),\n status: z.string(),\n }),\n }),\n ),\n);\n\nexport type GoogleErrorData = InferSchema<typeof googleErrorDataSchema>;\n\nexport const googleFailedResponseHandler = createJsonErrorResponseHandler({\n errorSchema: googleErrorDataSchema,\n errorToMessage: data => data.error.message,\n});\n","import {\n type InferSchema,\n lazySchema,\n zodSchema,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\n\nexport type GoogleGenerativeAIEmbeddingModelId =\n | 'gemini-embedding-001'\n | 'text-embedding-004'\n | (string & {});\n\nexport const googleGenerativeAIEmbeddingProviderOptions = lazySchema(() =>\n zodSchema(\n z.object({\n /**\n * Optional. Optional reduced dimension for the output embedding.\n * If set, excessive values in the output embedding are truncated from the end.\n */\n outputDimensionality: z.number().optional(),\n\n /**\n * Optional. Specifies the task type for generating embeddings.\n * Supported task types:\n * - SEMANTIC_SIMILARITY: Optimized for text similarity.\n * - CLASSIFICATION: Optimized for text classification.\n * - CLUSTERING: Optimized for clustering texts based on similarity.\n * - RETRIEVAL_DOCUMENT: Optimized for document retrieval.\n * - RETRIEVAL_QUERY: Optimized for query-based retrieval.\n * - QUESTION_ANSWERING: Optimized for answering questions.\n * - FACT_VERIFICATION: Optimized for verifying factual information.\n * - CODE_RETRIEVAL_QUERY: Optimized for retrieving code blocks based on natural language queries.\n */\n taskType: z\n .enum([\n 'SEMANTIC_SIMILARITY',\n 'CLASSIFICATION',\n 'CLUSTERING',\n 'RETRIEVAL_DOCUMENT',\n 'RETRIEVAL_QUERY',\n 'QUESTION_ANSWERING',\n 'FACT_VERIFICATION',\n 'CODE_RETRIEVAL_QUERY',\n ])\n .optional(),\n }),\n ),\n);\n\nexport type GoogleGenerativeAIEmbeddingProviderOptions = InferSchema<\n typeof googleGenerativeAIEmbeddingProviderOptions\n>;\n","import {\n LanguageModelV3,\n LanguageModelV3CallOptions,\n LanguageModelV3Content,\n LanguageModelV3FinishReason,\n LanguageModelV3GenerateResult,\n LanguageModelV3Source,\n LanguageModelV3StreamPart,\n LanguageModelV3StreamResult,\n SharedV3ProviderMetadata,\n SharedV3Warning,\n} from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createEventSourceResponseHandler,\n createJsonResponseHandler,\n FetchFunction,\n generateId,\n InferSchema,\n lazySchema,\n parseProviderOptions,\n ParseResult,\n postJsonToApi,\n Resolvable,\n resolve,\n zodSchema,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport {\n convertGoogleGenerativeAIUsage,\n GoogleGenerativeAIUsageMetadata,\n} from './convert-google-generative-ai-usage';\nimport { convertJSONSchemaToOpenAPISchema } from './convert-json-schema-to-openapi-schema';\nimport { convertToGoogleGenerativeAIMessages } from './convert-to-google-generative-ai-messages';\nimport { getModelPath } from './get-model-path';\nimport { googleFailedResponseHandler } from './google-error';\nimport {\n GoogleGenerativeAIModelId,\n googleGenerativeAIProviderOptions,\n} from './google-generative-ai-options';\nimport { GoogleGenerativeAIContentPart } from './google-generative-ai-prompt';\nimport { prepareTools } from './google-prepare-tools';\nimport { mapGoogleGenerativeAIFinishReason } from './map-google-generative-ai-finish-reason';\n\ntype GoogleGenerativeAIConfig = {\n provider: string;\n baseURL: string;\n headers: Resolvable<Record<string, string | undefined>>;\n fetch?: FetchFunction;\n generateId: () => string;\n\n /**\n * The supported URLs for the model.\n */\n supportedUrls?: () => LanguageModelV3['supportedUrls'];\n};\n\nexport class GoogleGenerativeAILanguageModel implements LanguageModelV3 {\n readonly specificationVersion = 'v3';\n\n readonly modelId: GoogleGenerativeAIModelId;\n\n private readonly config: GoogleGenerativeAIConfig;\n private readonly generateId: () => string;\n\n constructor(\n modelId: GoogleGenerativeAIModelId,\n config: GoogleGenerativeAIConfig,\n ) {\n this.modelId = modelId;\n this.config = config;\n this.generateId = config.generateId ?? generateId;\n }\n\n get provider(): string {\n return this.config.provider;\n }\n\n get supportedUrls() {\n return this.config.supportedUrls?.() ?? {};\n }\n\n private async getArgs({\n prompt,\n maxOutputTokens,\n temperature,\n topP,\n topK,\n frequencyPenalty,\n presencePenalty,\n stopSequences,\n responseFormat,\n seed,\n tools,\n toolChoice,\n providerOptions,\n }: LanguageModelV3CallOptions) {\n const warnings: SharedV3Warning[] = [];\n\n const providerOptionsName = this.config.provider.includes('vertex')\n ? 'vertex'\n : 'google';\n let googleOptions = await parseProviderOptions({\n provider: providerOptionsName,\n providerOptions,\n schema: googleGenerativeAIProviderOptions,\n });\n\n if (googleOptions == null && providerOptionsName !== 'google') {\n googleOptions = await parseProviderOptions({\n provider: 'google',\n providerOptions,\n schema: googleGenerativeAIProviderOptions,\n });\n }\n\n // Add warning if Vertex rag tools are used with a non-Vertex Google provider\n if (\n tools?.some(\n tool =>\n tool.type === 'provider' && tool.id === 'google.vertex_rag_store',\n ) &&\n !this.config.provider.startsWith('google.vertex.')\n ) {\n warnings.push({\n type: 'other',\n message:\n \"The 'vertex_rag_store' tool is only supported with the Google Vertex provider \" +\n 'and might not be supported or could behave unexpectedly with the current Google provider ' +\n `(${this.config.provider}).`,\n });\n }\n\n const isGemmaModel = this.modelId.toLowerCase().startsWith('gemma-');\n\n const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(\n prompt,\n { isGemmaModel, providerOptionsName },\n );\n\n const {\n tools: googleTools,\n toolConfig: googleToolConfig,\n toolWarnings,\n } = prepareTools({\n tools,\n toolChoice,\n modelId: this.modelId,\n });\n\n return {\n args: {\n generationConfig: {\n // standardized settings:\n maxOutputTokens,\n temperature,\n topK,\n topP,\n frequencyPenalty,\n presencePenalty,\n stopSequences,\n seed,\n\n // response format:\n responseMimeType:\n responseFormat?.type === 'json' ? 'application/json' : undefined,\n responseSchema:\n responseFormat?.type === 'json' &&\n responseFormat.schema != null &&\n // Google GenAI does not support all OpenAPI Schema features,\n // so this is needed as an escape hatch:\n // TODO convert into provider option\n (googleOptions?.structuredOutputs ?? true)\n ? convertJSONSchemaToOpenAPISchema(responseFormat.schema)\n : undefined,\n ...(googleOptions?.audioTimestamp && {\n audioTimestamp: googleOptions.audioTimestamp,\n }),\n\n // provider options:\n responseModalities: googleOptions?.responseModalities,\n thinkingConfig: googleOptions?.thinkingConfig,\n ...(googleOptions?.mediaResolution && {\n mediaResolution: googleOptions.mediaResolution,\n }),\n ...(googleOptions?.imageConfig && {\n imageConfig: googleOptions.imageConfig,\n }),\n },\n contents,\n systemInstruction: isGemmaModel ? undefined : systemInstruction,\n safetySettings: googleOptions?.safetySettings,\n tools: googleTools,\n toolConfig: googleOptions?.retrievalConfig\n ? {\n ...googleToolConfig,\n retrievalConfig: googleOptions.retrievalConfig,\n }\n : googleToolConfig,\n cachedContent: googleOptions?.cachedContent,\n labels: googleOptions?.labels,\n },\n warnings: [...warnings, ...toolWarnings],\n providerOptionsName,\n };\n }\n\n async doGenerate(\n options: LanguageModelV3CallOptions,\n ): Promise<LanguageModelV3GenerateResult> {\n const { args, warnings, providerOptionsName } = await this.getArgs(options);\n\n const mergedHeaders = combineHeaders(\n await resolve(this.config.headers),\n options.headers,\n );\n\n const {\n responseHeaders,\n value: response,\n rawValue: rawResponse,\n } = await postJsonToApi({\n url: `${this.config.baseURL}/${getModelPath(\n this.modelId,\n )}:generateContent`,\n headers: mergedHeaders,\n body: args,\n failedResponseHandler: googleFailedResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(responseSchema),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n const candidate = response.candidates[0];\n const content: Array<LanguageModelV3Content> = [];\n\n // map ordered parts to content:\n const parts = candidate.content?.parts ?? [];\n\n const usageMetadata = response.usageMetadata;\n\n // Associates a code execution result with its preceding call.\n let lastCodeExecutionToolCallId: string | undefined;\n\n // Build content array from all parts\n for (const part of parts) {\n if ('executableCode' in part && part.executableCode?.code) {\n const toolCallId = this.config.generateId();\n lastCodeExecutionToolCallId = toolCallId;\n\n content.push({\n type: 'tool-call',\n toolCallId,\n toolName: 'code_execution',\n input: JSON.stringify(part.executableCode),\n providerExecuted: true,\n });\n } else if ('codeExecutionResult' in part && part.codeExecutionResult) {\n content.push({\n type: 'tool-result',\n // Assumes a result directly follows its corresponding call part.\n toolCallId: lastCodeExecutionToolCallId!,\n toolName: 'code_execution',\n result: {\n outcome: part.codeExecutionResult.outcome,\n output: part.codeExecutionResult.output,\n },\n });\n // Clear the ID after use to avoid accidental reuse.\n lastCodeExecutionToolCallId = undefined;\n } else if ('text' in part && part.text != null && part.text.length > 0) {\n content.push({\n type: part.thought === true ? 'reasoning' : 'text',\n text: part.text,\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n });\n } else if ('functionCall' in part) {\n content.push({\n type: 'tool-call' as const,\n toolCallId: this.config.generateId(),\n toolName: part.functionCall.name,\n input: JSON.stringify(part.functionCall.args),\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n });\n } else if ('inlineData' in part) {\n content.push({\n type: 'file' as const,\n data: part.inlineData.data,\n mediaType: part.inlineData.mimeType,\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n });\n }\n }\n\n const sources =\n extractSources({\n groundingMetadata: candidate.groundingMetadata,\n generateId: this.config.generateId,\n }) ?? [];\n for (const source of sources) {\n content.push(source);\n }\n\n return {\n content,\n finishReason: {\n unified: mapGoogleGenerativeAIFinishReason({\n finishReason: candidate.finishReason,\n // Only count client-executed tool calls for finish reason determination.\n hasToolCalls: content.some(\n part => part.type === 'tool-call' && !part.providerExecuted,\n ),\n }),\n raw: candidate.finishReason ?? undefined,\n },\n usage: convertGoogleGenerativeAIUsage(usageMetadata),\n warnings,\n providerMetadata: {\n [providerOptionsName]: {\n promptFeedback: response.promptFeedback ?? null,\n groundingMetadata: candidate.groundingMetadata ?? null,\n urlContextMetadata: candidate.urlContextMetadata ?? null,\n safetyRatings: candidate.safetyRatings ?? null,\n usageMetadata: usageMetadata ?? null,\n },\n },\n request: { body: args },\n response: {\n // TODO timestamp, model id, id\n headers: responseHeaders,\n body: rawResponse,\n },\n };\n }\n\n async doStream(\n options: LanguageModelV3CallOptions,\n ): Promise<LanguageModelV3StreamResult> {\n const { args, warnings, providerOptionsName } = await this.getArgs(options);\n\n const headers = combineHeaders(\n await resolve(this.config.headers),\n options.headers,\n );\n\n const { responseHeaders, value: response } = await postJsonToApi({\n url: `${this.config.baseURL}/${getModelPath(\n this.modelId,\n )}:streamGenerateContent?alt=sse`,\n headers,\n body: args,\n failedResponseHandler: googleFailedResponseHandler,\n successfulResponseHandler: createEventSourceResponseHandler(chunkSchema),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n let finishReason: LanguageModelV3FinishReason = {\n unified: 'other',\n raw: undefined,\n };\n let usage: GoogleGenerativeAIUsageMetadata | undefined = undefined;\n let providerMetadata: SharedV3ProviderMetadata | undefined = undefined;\n\n const generateId = this.config.generateId;\n let hasToolCalls = false;\n\n // Track active blocks to group consecutive parts of same type\n let currentTextBlockId: string | null = null;\n let currentReasoningBlockId: string | null = null;\n let blockCounter = 0;\n\n // Track emitted sources to prevent duplicates\n const emittedSourceUrls = new Set<string>();\n // Associates a code execution result with its preceding call.\n let lastCodeExecutionToolCallId: string | undefined;\n\n return {\n stream: response.pipeThrough(\n new TransformStream<\n ParseResult<ChunkSchema>,\n LanguageModelV3StreamPart\n >({\n start(controller) {\n controller.enqueue({ type: 'stream-start', warnings });\n },\n\n transform(chunk, controller) {\n if (options.includeRawChunks) {\n controller.enqueue({ type: 'raw', rawValue: chunk.rawValue });\n }\n\n if (!chunk.success) {\n controller.enqueue({ type: 'error', error: chunk.error });\n return;\n }\n\n const value = chunk.value;\n\n const usageMetadata = value.usageMetadata;\n\n if (usageMetadata != null) {\n usage = usageMetadata;\n }\n\n const candidate = value.candidates?.[0];\n\n // sometimes the API returns an empty candidates array\n if (candidate == null) {\n return;\n }\n\n const content = candidate.content;\n\n const sources = extractSources({\n groundingMetadata: candidate.groundingMetadata,\n generateId,\n });\n if (sources != null) {\n for (const source of sources) {\n if (\n source.sourceType === 'url' &&\n !emittedSourceUrls.has(source.url)\n ) {\n emittedSourceUrls.add(source.url);\n controller.enqueue(source);\n }\n }\n }\n\n // Process tool call's parts before determining finishReason to ensure hasToolCalls is properly set\n if (content != null) {\n // Process all parts in a single loop to preserve original order\n const parts = content.parts ?? [];\n for (const part of parts) {\n if ('executableCode' in part && part.executableCode?.code) {\n const toolCallId = generateId();\n lastCodeExecutionToolCallId = toolCallId;\n\n controller.enqueue({\n type: 'tool-call',\n toolCallId,\n toolName: 'code_execution',\n input: JSON.stringify(part.executableCode),\n providerExecuted: true,\n });\n } else if (\n 'codeExecutionResult' in part &&\n part.codeExecutionResult\n ) {\n // Assumes a result directly follows its corresponding call part.\n const toolCallId = lastCodeExecutionToolCallId;\n\n if (toolCallId) {\n controller.enqueue({\n type: 'tool-result',\n toolCallId,\n toolName: 'code_execution',\n result: {\n outcome: part.codeExecutionResult.outcome,\n output: part.codeExecutionResult.output,\n },\n });\n // Clear the ID after use.\n lastCodeExecutionToolCallId = undefined;\n }\n } else if (\n 'text' in part &&\n part.text != null &&\n part.text.length > 0\n ) {\n if (part.thought === true) {\n // End any active text block before starting reasoning\n if (currentTextBlockId !== null) {\n controller.enqueue({\n type: 'text-end',\n id: currentTextBlockId,\n });\n currentTextBlockId = null;\n }\n\n // Start new reasoning block if not already active\n if (currentReasoningBlockId === null) {\n currentReasoningBlockId = String(blockCounter++);\n controller.enqueue({\n type: 'reasoning-start',\n id: currentReasoningBlockId,\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n });\n }\n\n controller.enqueue({\n type: 'reasoning-delta',\n id: currentReasoningBlockId,\n delta: part.text,\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n });\n } else {\n // End any active reasoning block before starting text\n if (currentReasoningBlockId !== null) {\n controller.enqueue({\n type: 'reasoning-end',\n id: currentReasoningBlockId,\n });\n currentReasoningBlockId = null;\n }\n\n // Start new text block if not already active\n if (currentTextBlockId === null) {\n currentTextBlockId = String(blockCounter++);\n controller.enqueue({\n type: 'text-start',\n id: currentTextBlockId,\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n });\n }\n\n controller.enqueue({\n type: 'text-delta',\n id: currentTextBlockId,\n delta: part.text,\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n });\n }\n } else if ('inlineData' in part) {\n // Process file parts inline to preserve order with text\n controller.enqueue({\n type: 'file',\n mediaType: part.inlineData.mimeType,\n data: part.inlineData.data,\n });\n }\n }\n\n const toolCallDeltas = getToolCallsFromParts({\n parts: content.parts,\n generateId,\n providerOptionsName,\n });\n\n if (toolCallDeltas != null) {\n for (const toolCall of toolCallDeltas) {\n controller.enqueue({\n type: 'tool-input-start',\n id: toolCall.toolCallId,\n toolName: toolCall.toolName,\n providerMetadata: toolCall.providerMetadata,\n });\n\n controller.enqueue({\n type: 'tool-input-delta',\n id: toolCall.toolCallId,\n delta: toolCall.args,\n providerMetadata: toolCall.providerMetadata,\n });\n\n controller.enqueue({\n type: 'tool-input-end',\n id: toolCall.toolCallId,\n providerMetadata: toolCall.providerMetadata,\n });\n\n controller.enqueue({\n type: 'tool-call',\n toolCallId: toolCall.toolCallId,\n toolName: toolCall.toolName,\n input: toolCall.args,\n providerMetadata: toolCall.providerMetadata,\n });\n\n hasToolCalls = true;\n }\n }\n }\n\n if (candidate.finishReason != null) {\n finishReason = {\n unified: mapGoogleGenerativeAIFinishReason({\n finishReason: candidate.finishReason,\n hasToolCalls,\n }),\n raw: candidate.finishReason,\n };\n\n providerMetadata = {\n [providerOptionsName]: {\n promptFeedback: value.promptFeedback ?? null,\n groundingMetadata: candidate.groundingMetadata ?? null,\n urlContextMetadata: candidate.urlContextMetadata ?? null,\n safetyRatings: candidate.safetyRatings ?? null,\n },\n };\n if (usageMetadata != null) {\n (\n providerMetadata[providerOptionsName] as Record<\n string,\n unknown\n >\n ).usageMetadata = usageMetadata;\n }\n }\n },\n\n flush(controller) {\n // Close any open blocks before finishing\n if (currentTextBlockId !== null) {\n controller.enqueue({\n type: 'text-end',\n id: currentTextBlockId,\n });\n }\n if (currentReasoningBlockId !== null) {\n controller.enqueue({\n type: 'reasoning-end',\n id: currentReasoningBlockId,\n });\n }\n\n controller.enqueue({\n type: 'finish',\n finishReason,\n usage: convertGoogleGenerativeAIUsage(usage),\n providerMetadata,\n });\n },\n }),\n ),\n response: { headers: responseHeaders },\n request: { body: args },\n };\n }\n}\n\nfunction getToolCallsFromParts({\n parts,\n generateId,\n providerOptionsName,\n}: {\n parts: ContentSchema['parts'];\n generateId: () => string;\n providerOptionsName: string;\n}) {\n const functionCallParts = parts?.filter(\n part => 'functionCall' in part,\n ) as Array<\n GoogleGenerativeAIContentPart & {\n functionCall: { name: string; args: unknown };\n thoughtSignature?: string | null;\n }\n >;\n\n return functionCallParts == null || functionCallParts.length === 0\n ? undefined\n : functionCallParts.map(part => ({\n type: 'tool-call' as const,\n toolCallId: generateId(),\n toolName: part.functionCall.name,\n args: JSON.stringify(part.functionCall.args),\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n }));\n}\n\nfunction extractSources({\n groundingMetadata,\n generateId,\n}: {\n groundingMetadata: GroundingMetadataSchema | undefined | null;\n generateId: () => string;\n}): undefined | LanguageModelV3Source[] {\n if (!groundingMetadata?.groundingChunks) {\n return undefined;\n }\n\n const sources: LanguageModelV3Source[] = [];\n\n for (const chunk of groundingMetadata.groundingChunks) {\n if (chunk.web != null) {\n // Handle web chunks as URL sources\n sources.push({\n type: 'source',\n sourceType: 'url',\n id: generateId(),\n url: chunk.web.uri,\n title: chunk.web.title ?? undefined,\n });\n } else if (chunk.retrievedContext != null) {\n // Handle retrievedContext chunks from RAG operations\n const uri = chunk.retrievedContext.uri;\n const fileSearchStore = chunk.retrievedContext.fileSearchStore;\n\n if (uri && (uri.startsWith('http://') || uri.startsWith('https://'))) {\n // Old format: Google Search with HTTP/HTTPS URL\n sources.push({\n type: 'source',\n sourceType: 'url',\n id: generateId(),\n url: uri,\n title: chunk.retrievedContext.title ?? undefined,\n });\n } else if (uri) {\n // Old format: Document with file path (gs://, etc.)\n const title = chunk.retrievedContext.title ?? 'Unknown Document';\n let mediaType = 'application/octet-stream';\n let filename: string | undefined = undefined;\n\n if (uri.endsWith('.pdf')) {\n mediaType = 'application/pdf';\n filename = uri.split('/').pop();\n } else if (uri.endsWith('.txt')) {\n mediaType = 'text/plain';\n filename = uri.split('/').pop();\n } else if (uri.endsWith('.docx')) {\n mediaType =\n 'application/vnd.openxmlformats-officedocument.wordprocessingml.document';\n filename = uri.split('/').pop();\n } else if (uri.endsWith('.doc')) {\n mediaType = 'application/msword';\n filename = uri.split('/').pop();\n } else if (uri.match(/\\.(md|markdown)$/)) {\n mediaType = 'text/markdown';\n filename = uri.split('/').pop();\n } else {\n filename = uri.split('/').pop();\n }\n\n sources.push({\n type: 'source',\n sourceType: 'document',\n id: generateId(),\n mediaType,\n title,\n filename,\n });\n } else if (fileSearchStore) {\n // New format: File Search with fileSearchStore (no uri)\n const title = chunk.retrievedContext.title ?? 'Unknown Document';\n sources.push({\n type: 'source',\n sourceType: 'document',\n id: generateId(),\n mediaType: 'application/octet-stream',\n title,\n filename: fileSearchStore.split('/').pop(),\n });\n }\n } else if (chunk.maps != null) {\n if (chunk.maps.uri) {\n sources.push({\n type: 'source',\n sourceType: 'url',\n id: generateId(),\n url: chunk.maps.uri,\n title: chunk.maps.title ?? undefined,\n });\n }\n }\n }\n\n return sources.length > 0 ? sources : undefined;\n}\n\nexport const getGroundingMetadataSchema = () =>\n z.object({\n webSearchQueries: z.array(z.string()).nullish(),\n retrievalQueries: z.array(z.string()).nullish(),\n searchEntryPoint: z.object({ renderedContent: z.string() }).nullish(),\n groundingChunks: z\n .array(\n z.object({\n web: z\n .object({ uri: z.string(), title: z.string().nullish() })\n .nullish(),\n retrievedContext: z\n .object({\n uri: z.string().nullish(),\n title: z.string().nullish(),\n text: z.string().nullish(),\n fileSearchStore: z.string().nullish(),\n })\n .nullish(),\n maps: z\n .object({\n uri: z.string().nullish(),\n title: z.string().nullish(),\n text: z.string().nullish(),\n placeId: z.string().nullish(),\n })\n .nullish(),\n }),\n )\n .nullish(),\n groundingSupports: z\n .array(\n z.object({\n segment: z.object({\n startIndex: z.number().nullish(),\n endIndex: z.number().nullish(),\n text: z.string().nullish(),\n }),\n segment_text: z.string().nullish(),\n groundingChunkIndices: z.array(z.number()).nullish(),\n supportChunkIndices: z.array(z.number()).nullish(),\n confidenceScores: z.array(z.number()).nullish(),\n confidenceScore: z.array(z.number()).nullish(),\n }),\n )\n .nullish(),\n retrievalMetadata: z\n .union([\n z.object({\n webDynamicRetrievalScore: z.number(),\n }),\n z.object({}),\n ])\n .nullish(),\n });\n\nconst getContentSchema = () =>\n z.object({\n parts: z\n .array(\n z.union([\n // note: order matters since text can be fully empty\n z.object({\n functionCall: z.object({\n name: z.string(),\n args: z.unknown(),\n }),\n thoughtSignature: z.string().nullish(),\n }),\n z.object({\n inlineData: z.object({\n mimeType: z.string(),\n data: z.string(),\n }),\n thoughtSignature: z.string().nullish(),\n }),\n z.object({\n executableCode: z\n .object({\n language: z.string(),\n code: z.string(),\n })\n .nullish(),\n codeExecutionResult: z\n .object({\n outcome: z.string(),\n output: z.string(),\n })\n .nullish(),\n text: z.string().nullish(),\n thought: z.boolean().nullish(),\n thoughtSignature: z.string().nullish(),\n }),\n ]),\n )\n .nullish(),\n });\n\n// https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters\nconst getSafetyRatingSchema = () =>\n z.object({\n category: z.string().nullish(),\n probability: z.string().nullish(),\n probabilityScore: z.number().nullish(),\n severity: z.string().nullish(),\n severityScore: z.number().nullish(),\n blocked: z.boolean().nullish(),\n });\n\nconst usageSchema = z.object({\n cachedContentTokenCount: z.number().nullish(),\n thoughtsTokenCount: z.number().nullish(),\n promptTokenCount: z.number().nullish(),\n candidatesTokenCount: z.number().nullish(),\n totalTokenCount: z.number().nullish(),\n // https://cloud.google.com/vertex-ai/generative-ai/docs/reference/rest/v1/GenerateContentResponse#TrafficType\n trafficType: z.string().nullish(),\n});\n\n// https://ai.google.dev/api/generate-content#UrlRetrievalMetadata\nexport const getUrlContextMetadataSchema = () =>\n z.object({\n urlMetadata: z.array(\n z.object({\n retrievedUrl: z.string(),\n urlRetrievalStatus: z.string(),\n }),\n ),\n });\n\nconst responseSchema = lazySchema(() =>\n zodSchema(\n z.object({\n candidates: z.array(\n z.object({\n content: getContentSchema().nullish().or(z.object({}).strict()),\n finishReason: z.string().nullish(),\n safetyRatings: z.array(getSafetyRatingSchema()).nullish(),\n groundingMetadata: getGroundingMetadataSchema().nullish(),\n urlContextMetadata: getUrlContextMetadataSchema().nullish(),\n }),\n ),\n usageMetadata: usageSchema.nullish(),\n promptFeedback: z\n .object({\n blockReason: z.string().nullish(),\n safetyRatings: z.array(getSafetyRatingSchema()).nullish(),\n })\n .nullish(),\n }),\n ),\n);\n\ntype ContentSchema = NonNullable<\n InferSchema<typeof responseSchema>['candidates'][number]['content']\n>;\nexport type GroundingMetadataSchema = NonNullable<\n InferSchema<typeof responseSchema>['candidates'][number]['groundingMetadata']\n>;\n\ntype GroundingChunkSchema = NonNullable<\n GroundingMetadataSchema['groundingChunks']\n>[number];\n\nexport type UrlContextMetadataSchema = NonNullable<\n InferSchema<typeof responseSchema>['candidates'][number]['urlContextMetadata']\n>;\n\nexport type SafetyRatingSchema = NonNullable<\n InferSchema<typeof responseSchema>['candidates'][number]['safetyRatings']\n>[number];\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst chunkSchema = lazySchema(() =>\n zodSchema(\n z.object({\n candidates: z\n .array(\n z.object({\n content: getContentSchema().nullish(),\n finishReason: z.string().nullish(),\n safetyRatings: z.array(getSafetyRatingSchema()).nullish(),\n groundingMetadata: getGroundingMetadataSchema().nullish(),\n urlContextMetadata: getUrlContextMetadataSchema().nullish(),\n }),\n )\n .nullish(),\n usageMetadata: usageSchema.nullish(),\n promptFeedback: z\n .object({\n blockReason: z.string().nullish(),\n safetyRatings: z.array(getSafetyRatingSchema()).nullish(),\n })\n .nullish(),\n }),\n ),\n);\n\ntype ChunkSchema = InferSchema<typeof chunkSchema>;\n","import { LanguageModelV3Usage } from '@ai-sdk/provider';\n\nexport type GoogleGenerativeAIUsageMetadata = {\n promptTokenCount?: number | null;\n candidatesTokenCount?: number | null;\n totalTokenCount?: number | null;\n cachedContentTokenCount?: number | null;\n thoughtsTokenCount?: number | null;\n trafficType?: string | null;\n};\n\nexport function convertGoogleGenerativeAIUsage(\n usage: GoogleGenerativeAIUsageMetadata | undefined | null,\n): LanguageModelV3Usage {\n if (usage == null) {\n return {\n inputTokens: {\n total: undefined,\n noCache: undefined,\n cacheRead: undefined,\n cacheWrite: undefined,\n },\n outputTokens: {\n total: undefined,\n text: undefined,\n reasoning: undefined,\n },\n raw: undefined,\n };\n }\n\n const promptTokens = usage.promptTokenCount ?? 0;\n const candidatesTokens = usage.candidatesTokenCount ?? 0;\n const cachedContentTokens = usage.cachedContentTokenCount ?? 0;\n const thoughtsTokens = usage.thoughtsTokenCount ?? 0;\n\n return {\n inputTokens: {\n total: promptTokens,\n noCache: promptTokens - cachedContentTokens,\n cacheRead: cachedContentTokens,\n cacheWrite: undefined,\n },\n outputTokens: {\n total: candidatesTokens + thoughtsTokens,\n text: candidatesTokens,\n reasoning: thoughtsTokens,\n },\n raw: usage,\n };\n}\n","import { JSONSchema7Definition } from '@ai-sdk/provider';\n\n/**\n * Converts JSON Schema 7 to OpenAPI Schema 3.0\n */\nexport function convertJSONSchemaToOpenAPISchema(\n jsonSchema: JSONSchema7Definition | undefined,\n isRoot = true,\n): unknown {\n // Handle empty object schemas: undefined at root, preserved when nested\n if (jsonSchema == null) {\n return undefined;\n }\n\n if (isEmptyObjectSchema(jsonSchema)) {\n if (is