@ai-sdk/google
Version:
The **[Google Generative AI provider](https://ai-sdk.dev/providers/ai-sdk-providers/google-generative-ai)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the [Google Generative AI](https://ai.google/discover/generativeai/)
1 lines • 103 kB
Source Map (JSON)
{"version":3,"sources":["../../src/google-generative-ai-language-model.ts","../../src/convert-google-generative-ai-usage.ts","../../src/convert-json-schema-to-openapi-schema.ts","../../src/convert-to-google-generative-ai-messages.ts","../../src/get-model-path.ts","../../src/google-error.ts","../../src/google-generative-ai-options.ts","../../src/google-prepare-tools.ts","../../src/map-google-generative-ai-finish-reason.ts","../../src/tool/code-execution.ts","../../src/tool/enterprise-web-search.ts","../../src/tool/file-search.ts","../../src/tool/google-maps.ts","../../src/tool/google-search.ts","../../src/tool/url-context.ts","../../src/tool/vertex-rag-store.ts","../../src/google-tools.ts"],"sourcesContent":["import {\n LanguageModelV3,\n LanguageModelV3CallOptions,\n LanguageModelV3Content,\n LanguageModelV3FinishReason,\n LanguageModelV3GenerateResult,\n LanguageModelV3Source,\n LanguageModelV3StreamPart,\n LanguageModelV3StreamResult,\n SharedV3ProviderMetadata,\n SharedV3Warning,\n} from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createEventSourceResponseHandler,\n createJsonResponseHandler,\n FetchFunction,\n generateId,\n InferSchema,\n lazySchema,\n parseProviderOptions,\n ParseResult,\n postJsonToApi,\n Resolvable,\n resolve,\n zodSchema,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport {\n convertGoogleGenerativeAIUsage,\n GoogleGenerativeAIUsageMetadata,\n} from './convert-google-generative-ai-usage';\nimport { convertJSONSchemaToOpenAPISchema } from './convert-json-schema-to-openapi-schema';\nimport { convertToGoogleGenerativeAIMessages } from './convert-to-google-generative-ai-messages';\nimport { getModelPath } from './get-model-path';\nimport { googleFailedResponseHandler } from './google-error';\nimport {\n GoogleGenerativeAIModelId,\n googleGenerativeAIProviderOptions,\n} from './google-generative-ai-options';\nimport { GoogleGenerativeAIContentPart } from './google-generative-ai-prompt';\nimport { prepareTools } from './google-prepare-tools';\nimport { mapGoogleGenerativeAIFinishReason } from './map-google-generative-ai-finish-reason';\n\ntype GoogleGenerativeAIConfig = {\n provider: string;\n baseURL: string;\n headers: Resolvable<Record<string, string | undefined>>;\n fetch?: FetchFunction;\n generateId: () => string;\n\n /**\n * The supported URLs for the model.\n */\n supportedUrls?: () => LanguageModelV3['supportedUrls'];\n};\n\nexport class GoogleGenerativeAILanguageModel implements LanguageModelV3 {\n readonly specificationVersion = 'v3';\n\n readonly modelId: GoogleGenerativeAIModelId;\n\n private readonly config: GoogleGenerativeAIConfig;\n private readonly generateId: () => string;\n\n constructor(\n modelId: GoogleGenerativeAIModelId,\n config: GoogleGenerativeAIConfig,\n ) {\n this.modelId = modelId;\n this.config = config;\n this.generateId = config.generateId ?? generateId;\n }\n\n get provider(): string {\n return this.config.provider;\n }\n\n get supportedUrls() {\n return this.config.supportedUrls?.() ?? {};\n }\n\n private async getArgs({\n prompt,\n maxOutputTokens,\n temperature,\n topP,\n topK,\n frequencyPenalty,\n presencePenalty,\n stopSequences,\n responseFormat,\n seed,\n tools,\n toolChoice,\n providerOptions,\n }: LanguageModelV3CallOptions) {\n const warnings: SharedV3Warning[] = [];\n\n const providerOptionsName = this.config.provider.includes('vertex')\n ? 'vertex'\n : 'google';\n let googleOptions = await parseProviderOptions({\n provider: providerOptionsName,\n providerOptions,\n schema: googleGenerativeAIProviderOptions,\n });\n\n if (googleOptions == null && providerOptionsName !== 'google') {\n googleOptions = await parseProviderOptions({\n provider: 'google',\n providerOptions,\n schema: googleGenerativeAIProviderOptions,\n });\n }\n\n // Add warning if Vertex rag tools are used with a non-Vertex Google provider\n if (\n tools?.some(\n tool =>\n tool.type === 'provider' && tool.id === 'google.vertex_rag_store',\n ) &&\n !this.config.provider.startsWith('google.vertex.')\n ) {\n warnings.push({\n type: 'other',\n message:\n \"The 'vertex_rag_store' tool is only supported with the Google Vertex provider \" +\n 'and might not be supported or could behave unexpectedly with the current Google provider ' +\n `(${this.config.provider}).`,\n });\n }\n\n const isGemmaModel = this.modelId.toLowerCase().startsWith('gemma-');\n\n const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(\n prompt,\n { isGemmaModel, providerOptionsName },\n );\n\n const {\n tools: googleTools,\n toolConfig: googleToolConfig,\n toolWarnings,\n } = prepareTools({\n tools,\n toolChoice,\n modelId: this.modelId,\n });\n\n return {\n args: {\n generationConfig: {\n // standardized settings:\n maxOutputTokens,\n temperature,\n topK,\n topP,\n frequencyPenalty,\n presencePenalty,\n stopSequences,\n seed,\n\n // response format:\n responseMimeType:\n responseFormat?.type === 'json' ? 'application/json' : undefined,\n responseSchema:\n responseFormat?.type === 'json' &&\n responseFormat.schema != null &&\n // Google GenAI does not support all OpenAPI Schema features,\n // so this is needed as an escape hatch:\n // TODO convert into provider option\n (googleOptions?.structuredOutputs ?? true)\n ? convertJSONSchemaToOpenAPISchema(responseFormat.schema)\n : undefined,\n ...(googleOptions?.audioTimestamp && {\n audioTimestamp: googleOptions.audioTimestamp,\n }),\n\n // provider options:\n responseModalities: googleOptions?.responseModalities,\n thinkingConfig: googleOptions?.thinkingConfig,\n ...(googleOptions?.mediaResolution && {\n mediaResolution: googleOptions.mediaResolution,\n }),\n ...(googleOptions?.imageConfig && {\n imageConfig: googleOptions.imageConfig,\n }),\n },\n contents,\n systemInstruction: isGemmaModel ? undefined : systemInstruction,\n safetySettings: googleOptions?.safetySettings,\n tools: googleTools,\n toolConfig: googleOptions?.retrievalConfig\n ? {\n ...googleToolConfig,\n retrievalConfig: googleOptions.retrievalConfig,\n }\n : googleToolConfig,\n cachedContent: googleOptions?.cachedContent,\n labels: googleOptions?.labels,\n },\n warnings: [...warnings, ...toolWarnings],\n providerOptionsName,\n };\n }\n\n async doGenerate(\n options: LanguageModelV3CallOptions,\n ): Promise<LanguageModelV3GenerateResult> {\n const { args, warnings, providerOptionsName } = await this.getArgs(options);\n\n const mergedHeaders = combineHeaders(\n await resolve(this.config.headers),\n options.headers,\n );\n\n const {\n responseHeaders,\n value: response,\n rawValue: rawResponse,\n } = await postJsonToApi({\n url: `${this.config.baseURL}/${getModelPath(\n this.modelId,\n )}:generateContent`,\n headers: mergedHeaders,\n body: args,\n failedResponseHandler: googleFailedResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(responseSchema),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n const candidate = response.candidates[0];\n const content: Array<LanguageModelV3Content> = [];\n\n // map ordered parts to content:\n const parts = candidate.content?.parts ?? [];\n\n const usageMetadata = response.usageMetadata;\n\n // Associates a code execution result with its preceding call.\n let lastCodeExecutionToolCallId: string | undefined;\n\n // Build content array from all parts\n for (const part of parts) {\n if ('executableCode' in part && part.executableCode?.code) {\n const toolCallId = this.config.generateId();\n lastCodeExecutionToolCallId = toolCallId;\n\n content.push({\n type: 'tool-call',\n toolCallId,\n toolName: 'code_execution',\n input: JSON.stringify(part.executableCode),\n providerExecuted: true,\n });\n } else if ('codeExecutionResult' in part && part.codeExecutionResult) {\n content.push({\n type: 'tool-result',\n // Assumes a result directly follows its corresponding call part.\n toolCallId: lastCodeExecutionToolCallId!,\n toolName: 'code_execution',\n result: {\n outcome: part.codeExecutionResult.outcome,\n output: part.codeExecutionResult.output,\n },\n });\n // Clear the ID after use to avoid accidental reuse.\n lastCodeExecutionToolCallId = undefined;\n } else if ('text' in part && part.text != null && part.text.length > 0) {\n content.push({\n type: part.thought === true ? 'reasoning' : 'text',\n text: part.text,\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n });\n } else if ('functionCall' in part) {\n content.push({\n type: 'tool-call' as const,\n toolCallId: this.config.generateId(),\n toolName: part.functionCall.name,\n input: JSON.stringify(part.functionCall.args),\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n });\n } else if ('inlineData' in part) {\n content.push({\n type: 'file' as const,\n data: part.inlineData.data,\n mediaType: part.inlineData.mimeType,\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n });\n }\n }\n\n const sources =\n extractSources({\n groundingMetadata: candidate.groundingMetadata,\n generateId: this.config.generateId,\n }) ?? [];\n for (const source of sources) {\n content.push(source);\n }\n\n return {\n content,\n finishReason: {\n unified: mapGoogleGenerativeAIFinishReason({\n finishReason: candidate.finishReason,\n // Only count client-executed tool calls for finish reason determination.\n hasToolCalls: content.some(\n part => part.type === 'tool-call' && !part.providerExecuted,\n ),\n }),\n raw: candidate.finishReason ?? undefined,\n },\n usage: convertGoogleGenerativeAIUsage(usageMetadata),\n warnings,\n providerMetadata: {\n [providerOptionsName]: {\n promptFeedback: response.promptFeedback ?? null,\n groundingMetadata: candidate.groundingMetadata ?? null,\n urlContextMetadata: candidate.urlContextMetadata ?? null,\n safetyRatings: candidate.safetyRatings ?? null,\n usageMetadata: usageMetadata ?? null,\n },\n },\n request: { body: args },\n response: {\n // TODO timestamp, model id, id\n headers: responseHeaders,\n body: rawResponse,\n },\n };\n }\n\n async doStream(\n options: LanguageModelV3CallOptions,\n ): Promise<LanguageModelV3StreamResult> {\n const { args, warnings, providerOptionsName } = await this.getArgs(options);\n\n const headers = combineHeaders(\n await resolve(this.config.headers),\n options.headers,\n );\n\n const { responseHeaders, value: response } = await postJsonToApi({\n url: `${this.config.baseURL}/${getModelPath(\n this.modelId,\n )}:streamGenerateContent?alt=sse`,\n headers,\n body: args,\n failedResponseHandler: googleFailedResponseHandler,\n successfulResponseHandler: createEventSourceResponseHandler(chunkSchema),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n let finishReason: LanguageModelV3FinishReason = {\n unified: 'other',\n raw: undefined,\n };\n let usage: GoogleGenerativeAIUsageMetadata | undefined = undefined;\n let providerMetadata: SharedV3ProviderMetadata | undefined = undefined;\n\n const generateId = this.config.generateId;\n let hasToolCalls = false;\n\n // Track active blocks to group consecutive parts of same type\n let currentTextBlockId: string | null = null;\n let currentReasoningBlockId: string | null = null;\n let blockCounter = 0;\n\n // Track emitted sources to prevent duplicates\n const emittedSourceUrls = new Set<string>();\n // Associates a code execution result with its preceding call.\n let lastCodeExecutionToolCallId: string | undefined;\n\n return {\n stream: response.pipeThrough(\n new TransformStream<\n ParseResult<ChunkSchema>,\n LanguageModelV3StreamPart\n >({\n start(controller) {\n controller.enqueue({ type: 'stream-start', warnings });\n },\n\n transform(chunk, controller) {\n if (options.includeRawChunks) {\n controller.enqueue({ type: 'raw', rawValue: chunk.rawValue });\n }\n\n if (!chunk.success) {\n controller.enqueue({ type: 'error', error: chunk.error });\n return;\n }\n\n const value = chunk.value;\n\n const usageMetadata = value.usageMetadata;\n\n if (usageMetadata != null) {\n usage = usageMetadata;\n }\n\n const candidate = value.candidates?.[0];\n\n // sometimes the API returns an empty candidates array\n if (candidate == null) {\n return;\n }\n\n const content = candidate.content;\n\n const sources = extractSources({\n groundingMetadata: candidate.groundingMetadata,\n generateId,\n });\n if (sources != null) {\n for (const source of sources) {\n if (\n source.sourceType === 'url' &&\n !emittedSourceUrls.has(source.url)\n ) {\n emittedSourceUrls.add(source.url);\n controller.enqueue(source);\n }\n }\n }\n\n // Process tool call's parts before determining finishReason to ensure hasToolCalls is properly set\n if (content != null) {\n // Process all parts in a single loop to preserve original order\n const parts = content.parts ?? [];\n for (const part of parts) {\n if ('executableCode' in part && part.executableCode?.code) {\n const toolCallId = generateId();\n lastCodeExecutionToolCallId = toolCallId;\n\n controller.enqueue({\n type: 'tool-call',\n toolCallId,\n toolName: 'code_execution',\n input: JSON.stringify(part.executableCode),\n providerExecuted: true,\n });\n } else if (\n 'codeExecutionResult' in part &&\n part.codeExecutionResult\n ) {\n // Assumes a result directly follows its corresponding call part.\n const toolCallId = lastCodeExecutionToolCallId;\n\n if (toolCallId) {\n controller.enqueue({\n type: 'tool-result',\n toolCallId,\n toolName: 'code_execution',\n result: {\n outcome: part.codeExecutionResult.outcome,\n output: part.codeExecutionResult.output,\n },\n });\n // Clear the ID after use.\n lastCodeExecutionToolCallId = undefined;\n }\n } else if (\n 'text' in part &&\n part.text != null &&\n part.text.length > 0\n ) {\n if (part.thought === true) {\n // End any active text block before starting reasoning\n if (currentTextBlockId !== null) {\n controller.enqueue({\n type: 'text-end',\n id: currentTextBlockId,\n });\n currentTextBlockId = null;\n }\n\n // Start new reasoning block if not already active\n if (currentReasoningBlockId === null) {\n currentReasoningBlockId = String(blockCounter++);\n controller.enqueue({\n type: 'reasoning-start',\n id: currentReasoningBlockId,\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n });\n }\n\n controller.enqueue({\n type: 'reasoning-delta',\n id: currentReasoningBlockId,\n delta: part.text,\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n });\n } else {\n // End any active reasoning block before starting text\n if (currentReasoningBlockId !== null) {\n controller.enqueue({\n type: 'reasoning-end',\n id: currentReasoningBlockId,\n });\n currentReasoningBlockId = null;\n }\n\n // Start new text block if not already active\n if (currentTextBlockId === null) {\n currentTextBlockId = String(blockCounter++);\n controller.enqueue({\n type: 'text-start',\n id: currentTextBlockId,\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n });\n }\n\n controller.enqueue({\n type: 'text-delta',\n id: currentTextBlockId,\n delta: part.text,\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n });\n }\n } else if ('inlineData' in part) {\n // Process file parts inline to preserve order with text\n controller.enqueue({\n type: 'file',\n mediaType: part.inlineData.mimeType,\n data: part.inlineData.data,\n });\n }\n }\n\n const toolCallDeltas = getToolCallsFromParts({\n parts: content.parts,\n generateId,\n providerOptionsName,\n });\n\n if (toolCallDeltas != null) {\n for (const toolCall of toolCallDeltas) {\n controller.enqueue({\n type: 'tool-input-start',\n id: toolCall.toolCallId,\n toolName: toolCall.toolName,\n providerMetadata: toolCall.providerMetadata,\n });\n\n controller.enqueue({\n type: 'tool-input-delta',\n id: toolCall.toolCallId,\n delta: toolCall.args,\n providerMetadata: toolCall.providerMetadata,\n });\n\n controller.enqueue({\n type: 'tool-input-end',\n id: toolCall.toolCallId,\n providerMetadata: toolCall.providerMetadata,\n });\n\n controller.enqueue({\n type: 'tool-call',\n toolCallId: toolCall.toolCallId,\n toolName: toolCall.toolName,\n input: toolCall.args,\n providerMetadata: toolCall.providerMetadata,\n });\n\n hasToolCalls = true;\n }\n }\n }\n\n if (candidate.finishReason != null) {\n finishReason = {\n unified: mapGoogleGenerativeAIFinishReason({\n finishReason: candidate.finishReason,\n hasToolCalls,\n }),\n raw: candidate.finishReason,\n };\n\n providerMetadata = {\n [providerOptionsName]: {\n promptFeedback: value.promptFeedback ?? null,\n groundingMetadata: candidate.groundingMetadata ?? null,\n urlContextMetadata: candidate.urlContextMetadata ?? null,\n safetyRatings: candidate.safetyRatings ?? null,\n },\n };\n if (usageMetadata != null) {\n (\n providerMetadata[providerOptionsName] as Record<\n string,\n unknown\n >\n ).usageMetadata = usageMetadata;\n }\n }\n },\n\n flush(controller) {\n // Close any open blocks before finishing\n if (currentTextBlockId !== null) {\n controller.enqueue({\n type: 'text-end',\n id: currentTextBlockId,\n });\n }\n if (currentReasoningBlockId !== null) {\n controller.enqueue({\n type: 'reasoning-end',\n id: currentReasoningBlockId,\n });\n }\n\n controller.enqueue({\n type: 'finish',\n finishReason,\n usage: convertGoogleGenerativeAIUsage(usage),\n providerMetadata,\n });\n },\n }),\n ),\n response: { headers: responseHeaders },\n request: { body: args },\n };\n }\n}\n\nfunction getToolCallsFromParts({\n parts,\n generateId,\n providerOptionsName,\n}: {\n parts: ContentSchema['parts'];\n generateId: () => string;\n providerOptionsName: string;\n}) {\n const functionCallParts = parts?.filter(\n part => 'functionCall' in part,\n ) as Array<\n GoogleGenerativeAIContentPart & {\n functionCall: { name: string; args: unknown };\n thoughtSignature?: string | null;\n }\n >;\n\n return functionCallParts == null || functionCallParts.length === 0\n ? undefined\n : functionCallParts.map(part => ({\n type: 'tool-call' as const,\n toolCallId: generateId(),\n toolName: part.functionCall.name,\n args: JSON.stringify(part.functionCall.args),\n providerMetadata: part.thoughtSignature\n ? {\n [providerOptionsName]: {\n thoughtSignature: part.thoughtSignature,\n },\n }\n : undefined,\n }));\n}\n\nfunction extractSources({\n groundingMetadata,\n generateId,\n}: {\n groundingMetadata: GroundingMetadataSchema | undefined | null;\n generateId: () => string;\n}): undefined | LanguageModelV3Source[] {\n if (!groundingMetadata?.groundingChunks) {\n return undefined;\n }\n\n const sources: LanguageModelV3Source[] = [];\n\n for (const chunk of groundingMetadata.groundingChunks) {\n if (chunk.web != null) {\n // Handle web chunks as URL sources\n sources.push({\n type: 'source',\n sourceType: 'url',\n id: generateId(),\n url: chunk.web.uri,\n title: chunk.web.title ?? undefined,\n });\n } else if (chunk.retrievedContext != null) {\n // Handle retrievedContext chunks from RAG operations\n const uri = chunk.retrievedContext.uri;\n const fileSearchStore = chunk.retrievedContext.fileSearchStore;\n\n if (uri && (uri.startsWith('http://') || uri.startsWith('https://'))) {\n // Old format: Google Search with HTTP/HTTPS URL\n sources.push({\n type: 'source',\n sourceType: 'url',\n id: generateId(),\n url: uri,\n title: chunk.retrievedContext.title ?? undefined,\n });\n } else if (uri) {\n // Old format: Document with file path (gs://, etc.)\n const title = chunk.retrievedContext.title ?? 'Unknown Document';\n let mediaType = 'application/octet-stream';\n let filename: string | undefined = undefined;\n\n if (uri.endsWith('.pdf')) {\n mediaType = 'application/pdf';\n filename = uri.split('/').pop();\n } else if (uri.endsWith('.txt')) {\n mediaType = 'text/plain';\n filename = uri.split('/').pop();\n } else if (uri.endsWith('.docx')) {\n mediaType =\n 'application/vnd.openxmlformats-officedocument.wordprocessingml.document';\n filename = uri.split('/').pop();\n } else if (uri.endsWith('.doc')) {\n mediaType = 'application/msword';\n filename = uri.split('/').pop();\n } else if (uri.match(/\\.(md|markdown)$/)) {\n mediaType = 'text/markdown';\n filename = uri.split('/').pop();\n } else {\n filename = uri.split('/').pop();\n }\n\n sources.push({\n type: 'source',\n sourceType: 'document',\n id: generateId(),\n mediaType,\n title,\n filename,\n });\n } else if (fileSearchStore) {\n // New format: File Search with fileSearchStore (no uri)\n const title = chunk.retrievedContext.title ?? 'Unknown Document';\n sources.push({\n type: 'source',\n sourceType: 'document',\n id: generateId(),\n mediaType: 'application/octet-stream',\n title,\n filename: fileSearchStore.split('/').pop(),\n });\n }\n } else if (chunk.maps != null) {\n if (chunk.maps.uri) {\n sources.push({\n type: 'source',\n sourceType: 'url',\n id: generateId(),\n url: chunk.maps.uri,\n title: chunk.maps.title ?? undefined,\n });\n }\n }\n }\n\n return sources.length > 0 ? sources : undefined;\n}\n\nexport const getGroundingMetadataSchema = () =>\n z.object({\n webSearchQueries: z.array(z.string()).nullish(),\n retrievalQueries: z.array(z.string()).nullish(),\n searchEntryPoint: z.object({ renderedContent: z.string() }).nullish(),\n groundingChunks: z\n .array(\n z.object({\n web: z\n .object({ uri: z.string(), title: z.string().nullish() })\n .nullish(),\n retrievedContext: z\n .object({\n uri: z.string().nullish(),\n title: z.string().nullish(),\n text: z.string().nullish(),\n fileSearchStore: z.string().nullish(),\n })\n .nullish(),\n maps: z\n .object({\n uri: z.string().nullish(),\n title: z.string().nullish(),\n text: z.string().nullish(),\n placeId: z.string().nullish(),\n })\n .nullish(),\n }),\n )\n .nullish(),\n groundingSupports: z\n .array(\n z.object({\n segment: z.object({\n startIndex: z.number().nullish(),\n endIndex: z.number().nullish(),\n text: z.string().nullish(),\n }),\n segment_text: z.string().nullish(),\n groundingChunkIndices: z.array(z.number()).nullish(),\n supportChunkIndices: z.array(z.number()).nullish(),\n confidenceScores: z.array(z.number()).nullish(),\n confidenceScore: z.array(z.number()).nullish(),\n }),\n )\n .nullish(),\n retrievalMetadata: z\n .union([\n z.object({\n webDynamicRetrievalScore: z.number(),\n }),\n z.object({}),\n ])\n .nullish(),\n });\n\nconst getContentSchema = () =>\n z.object({\n parts: z\n .array(\n z.union([\n // note: order matters since text can be fully empty\n z.object({\n functionCall: z.object({\n name: z.string(),\n args: z.unknown(),\n }),\n thoughtSignature: z.string().nullish(),\n }),\n z.object({\n inlineData: z.object({\n mimeType: z.string(),\n data: z.string(),\n }),\n thoughtSignature: z.string().nullish(),\n }),\n z.object({\n executableCode: z\n .object({\n language: z.string(),\n code: z.string(),\n })\n .nullish(),\n codeExecutionResult: z\n .object({\n outcome: z.string(),\n output: z.string(),\n })\n .nullish(),\n text: z.string().nullish(),\n thought: z.boolean().nullish(),\n thoughtSignature: z.string().nullish(),\n }),\n ]),\n )\n .nullish(),\n });\n\n// https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters\nconst getSafetyRatingSchema = () =>\n z.object({\n category: z.string().nullish(),\n probability: z.string().nullish(),\n probabilityScore: z.number().nullish(),\n severity: z.string().nullish(),\n severityScore: z.number().nullish(),\n blocked: z.boolean().nullish(),\n });\n\nconst usageSchema = z.object({\n cachedContentTokenCount: z.number().nullish(),\n thoughtsTokenCount: z.number().nullish(),\n promptTokenCount: z.number().nullish(),\n candidatesTokenCount: z.number().nullish(),\n totalTokenCount: z.number().nullish(),\n // https://cloud.google.com/vertex-ai/generative-ai/docs/reference/rest/v1/GenerateContentResponse#TrafficType\n trafficType: z.string().nullish(),\n});\n\n// https://ai.google.dev/api/generate-content#UrlRetrievalMetadata\nexport const getUrlContextMetadataSchema = () =>\n z.object({\n urlMetadata: z.array(\n z.object({\n retrievedUrl: z.string(),\n urlRetrievalStatus: z.string(),\n }),\n ),\n });\n\nconst responseSchema = lazySchema(() =>\n zodSchema(\n z.object({\n candidates: z.array(\n z.object({\n content: getContentSchema().nullish().or(z.object({}).strict()),\n finishReason: z.string().nullish(),\n safetyRatings: z.array(getSafetyRatingSchema()).nullish(),\n groundingMetadata: getGroundingMetadataSchema().nullish(),\n urlContextMetadata: getUrlContextMetadataSchema().nullish(),\n }),\n ),\n usageMetadata: usageSchema.nullish(),\n promptFeedback: z\n .object({\n blockReason: z.string().nullish(),\n safetyRatings: z.array(getSafetyRatingSchema()).nullish(),\n })\n .nullish(),\n }),\n ),\n);\n\ntype ContentSchema = NonNullable<\n InferSchema<typeof responseSchema>['candidates'][number]['content']\n>;\nexport type GroundingMetadataSchema = NonNullable<\n InferSchema<typeof responseSchema>['candidates'][number]['groundingMetadata']\n>;\n\ntype GroundingChunkSchema = NonNullable<\n GroundingMetadataSchema['groundingChunks']\n>[number];\n\nexport type UrlContextMetadataSchema = NonNullable<\n InferSchema<typeof responseSchema>['candidates'][number]['urlContextMetadata']\n>;\n\nexport type SafetyRatingSchema = NonNullable<\n InferSchema<typeof responseSchema>['candidates'][number]['safetyRatings']\n>[number];\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst chunkSchema = lazySchema(() =>\n zodSchema(\n z.object({\n candidates: z\n .array(\n z.object({\n content: getContentSchema().nullish(),\n finishReason: z.string().nullish(),\n safetyRatings: z.array(getSafetyRatingSchema()).nullish(),\n groundingMetadata: getGroundingMetadataSchema().nullish(),\n urlContextMetadata: getUrlContextMetadataSchema().nullish(),\n }),\n )\n .nullish(),\n usageMetadata: usageSchema.nullish(),\n promptFeedback: z\n .object({\n blockReason: z.string().nullish(),\n safetyRatings: z.array(getSafetyRatingSchema()).nullish(),\n })\n .nullish(),\n }),\n ),\n);\n\ntype ChunkSchema = InferSchema<typeof chunkSchema>;\n","import { LanguageModelV3Usage } from '@ai-sdk/provider';\n\nexport type GoogleGenerativeAIUsageMetadata = {\n promptTokenCount?: number | null;\n candidatesTokenCount?: number | null;\n totalTokenCount?: number | null;\n cachedContentTokenCount?: number | null;\n thoughtsTokenCount?: number | null;\n trafficType?: string | null;\n};\n\nexport function convertGoogleGenerativeAIUsage(\n usage: GoogleGenerativeAIUsageMetadata | undefined | null,\n): LanguageModelV3Usage {\n if (usage == null) {\n return {\n inputTokens: {\n total: undefined,\n noCache: undefined,\n cacheRead: undefined,\n cacheWrite: undefined,\n },\n outputTokens: {\n total: undefined,\n text: undefined,\n reasoning: undefined,\n },\n raw: undefined,\n };\n }\n\n const promptTokens = usage.promptTokenCount ?? 0;\n const candidatesTokens = usage.candidatesTokenCount ?? 0;\n const cachedContentTokens = usage.cachedContentTokenCount ?? 0;\n const thoughtsTokens = usage.thoughtsTokenCount ?? 0;\n\n return {\n inputTokens: {\n total: promptTokens,\n noCache: promptTokens - cachedContentTokens,\n cacheRead: cachedContentTokens,\n cacheWrite: undefined,\n },\n outputTokens: {\n total: candidatesTokens + thoughtsTokens,\n text: candidatesTokens,\n reasoning: thoughtsTokens,\n },\n raw: usage,\n };\n}\n","import { JSONSchema7Definition } from '@ai-sdk/provider';\n\n/**\n * Converts JSON Schema 7 to OpenAPI Schema 3.0\n */\nexport function convertJSONSchemaToOpenAPISchema(\n jsonSchema: JSONSchema7Definition | undefined,\n isRoot = true,\n): unknown {\n // Handle empty object schemas: undefined at root, preserved when nested\n if (jsonSchema == null) {\n return undefined;\n }\n\n if (isEmptyObjectSchema(jsonSchema)) {\n if (isRoot) {\n return undefined;\n }\n\n if (typeof jsonSchema === 'object' && jsonSchema.description) {\n return { type: 'object', description: jsonSchema.description };\n }\n return { type: 'object' };\n }\n\n if (typeof jsonSchema === 'boolean') {\n return { type: 'boolean', properties: {} };\n }\n\n const {\n type,\n description,\n required,\n properties,\n items,\n allOf,\n anyOf,\n oneOf,\n format,\n const: constValue,\n minLength,\n enum: enumValues,\n } = jsonSchema;\n\n const result: Record<string, unknown> = {};\n\n if (description) result.description = description;\n if (required) result.required = required;\n if (format) result.format = format;\n\n if (constValue !== undefined) {\n result.enum = [constValue];\n }\n\n // Handle type\n if (type) {\n if (Array.isArray(type)) {\n const hasNull = type.includes('null');\n const nonNullTypes = type.filter(t => t !== 'null');\n\n if (nonNullTypes.length === 0) {\n // Only null type\n result.type = 'null';\n } else {\n // One or more non-null types: always use anyOf\n result.anyOf = nonNullTypes.map(t => ({ type: t }));\n if (hasNull) {\n result.nullable = true;\n }\n }\n } else {\n result.type = type;\n }\n }\n\n // Handle enum\n if (enumValues !== undefined) {\n result.enum = enumValues;\n }\n\n if (properties != null) {\n result.properties = Object.entries(properties).reduce(\n (acc, [key, value]) => {\n acc[key] = convertJSONSchemaToOpenAPISchema(value, false);\n return acc;\n },\n {} as Record<string, unknown>,\n );\n }\n\n if (items) {\n result.items = Array.isArray(items)\n ? items.map(item => convertJSONSchemaToOpenAPISchema(item, false))\n : convertJSONSchemaToOpenAPISchema(items, false);\n }\n\n if (allOf) {\n result.allOf = allOf.map(item =>\n convertJSONSchemaToOpenAPISchema(item, false),\n );\n }\n if (anyOf) {\n // Handle cases where anyOf includes a null type\n if (\n anyOf.some(\n schema => typeof schema === 'object' && schema?.type === 'null',\n )\n ) {\n const nonNullSchemas = anyOf.filter(\n schema => !(typeof schema === 'object' && schema?.type === 'null'),\n );\n\n if (nonNullSchemas.length === 1) {\n // If there's only one non-null schema, convert it and make it nullable\n const converted = convertJSONSchemaToOpenAPISchema(\n nonNullSchemas[0],\n false,\n );\n if (typeof converted === 'object') {\n result.nullable = true;\n Object.assign(result, converted);\n }\n } else {\n // If there are multiple non-null schemas, keep them in anyOf\n result.anyOf = nonNullSchemas.map(item =>\n convertJSONSchemaToOpenAPISchema(item, false),\n );\n result.nullable = true;\n }\n } else {\n result.anyOf = anyOf.map(item =>\n convertJSONSchemaToOpenAPISchema(item, false),\n );\n }\n }\n if (oneOf) {\n result.oneOf = oneOf.map(item =>\n convertJSONSchemaToOpenAPISchema(item, false),\n );\n }\n\n if (minLength !== undefined) {\n result.minLength = minLength;\n }\n\n return result;\n}\n\nfunction isEmptyObjectSchema(jsonSchema: JSONSchema7Definition): boolean {\n return (\n jsonSchema != null &&\n typeof jsonSchema === 'object' &&\n jsonSchema.type === 'object' &&\n (jsonSchema.properties == null ||\n Object.keys(jsonSchema.properties).length === 0) &&\n !jsonSchema.additionalProperties\n );\n}\n","import {\n LanguageModelV3Prompt,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport {\n GoogleGenerativeAIContent,\n GoogleGenerativeAIContentPart,\n GoogleGenerativeAIPrompt,\n} from './google-generative-ai-prompt';\nimport { convertToBase64 } from '@ai-sdk/provider-utils';\n\nexport function convertToGoogleGenerativeAIMessages(\n prompt: LanguageModelV3Prompt,\n options?: { isGemmaModel?: boolean; providerOptionsName?: string },\n): GoogleGenerativeAIPrompt {\n const systemInstructionParts: Array<{ text: string }> = [];\n const contents: Array<GoogleGenerativeAIContent> = [];\n let systemMessagesAllowed = true;\n const isGemmaModel = options?.isGemmaModel ?? false;\n const providerOptionsName = options?.providerOptionsName ?? 'google';\n\n for (const { role, content } of prompt) {\n switch (role) {\n case 'system': {\n if (!systemMessagesAllowed) {\n throw new UnsupportedFunctionalityError({\n functionality:\n 'system messages are only supported at the beginning of the conversation',\n });\n }\n\n systemInstructionParts.push({ text: content });\n break;\n }\n\n case 'user': {\n systemMessagesAllowed = false;\n\n const parts: GoogleGenerativeAIContentPart[] = [];\n\n for (const part of content) {\n switch (part.type) {\n case 'text': {\n parts.push({ text: part.text });\n break;\n }\n\n case 'file': {\n // default to image/jpeg for unknown image/* types\n const mediaType =\n part.mediaType === 'image/*' ? 'image/jpeg' : part.mediaType;\n\n parts.push(\n part.data instanceof URL\n ? {\n fileData: {\n mimeType: mediaType,\n fileUri: part.data.toString(),\n },\n }\n : {\n inlineData: {\n mimeType: mediaType,\n data: convertToBase64(part.data),\n },\n },\n );\n\n break;\n }\n }\n }\n\n contents.push({ role: 'user', parts });\n break;\n }\n\n case 'assistant': {\n systemMessagesAllowed = false;\n\n contents.push({\n role: 'model',\n parts: content\n .map(part => {\n const providerOpts = part.providerOptions?.[providerOptionsName];\n const thoughtSignature =\n providerOpts?.thoughtSignature != null\n ? String(providerOpts.thoughtSignature)\n : undefined;\n\n switch (part.type) {\n case 'text': {\n return part.text.length === 0\n ? undefined\n : {\n text: part.text,\n thoughtSignature,\n };\n }\n\n case 'reasoning': {\n return part.text.length === 0\n ? undefined\n : {\n text: part.text,\n thought: true,\n thoughtSignature,\n };\n }\n\n case 'file': {\n if (part.data instanceof URL) {\n throw new UnsupportedFunctionalityError({\n functionality:\n 'File data URLs in assistant messages are not supported',\n });\n }\n\n return {\n inlineData: {\n mimeType: part.mediaType,\n data: convertToBase64(part.data),\n },\n thoughtSignature,\n };\n }\n\n case 'tool-call': {\n return {\n functionCall: {\n name: part.toolName,\n args: part.input,\n },\n thoughtSignature,\n };\n }\n }\n })\n .filter(part => part !== undefined),\n });\n break;\n }\n\n case 'tool': {\n systemMessagesAllowed = false;\n\n const parts: GoogleGenerativeAIContentPart[] = [];\n\n for (const part of content) {\n if (part.type === 'tool-approval-response') {\n continue;\n }\n const output = part.output;\n\n if (output.type === 'content') {\n for (const contentPart of output.value) {\n switch (contentPart.type) {\n case 'text':\n parts.push({\n functionResponse: {\n name: part.toolName,\n response: {\n name: part.toolName,\n content: contentPart.text,\n },\n },\n });\n break;\n case 'image-data':\n parts.push(\n {\n inlineData: {\n mimeType: contentPart.mediaType,\n data: contentPart.data,\n },\n },\n {\n text: 'Tool executed successfully and returned this image as a response',\n },\n );\n break;\n default:\n parts.push({ text: JSON.stringify(contentPart) });\n break;\n }\n }\n } else {\n parts.push({\n functionResponse: {\n name: part.toolName,\n response: {\n name: part.toolName,\n content:\n output.type === 'execution-denied'\n ? (output.reason ?? 'Tool execution denied.')\n : output.value,\n },\n },\n });\n }\n }\n\n contents.push({\n role: 'user',\n parts,\n });\n break;\n }\n }\n }\n\n if (\n isGemmaModel &&\n systemInstructionParts.length > 0 &&\n contents.length > 0 &&\n contents[0].role === 'user'\n ) {\n const systemText = systemInstructionParts\n .map(part => part.text)\n .join('\\n\\n');\n\n contents[0].parts.unshift({ text: systemText + '\\n\\n' });\n }\n\n return {\n systemInstruction:\n systemInstructionParts.length > 0 && !isGemmaModel\n ? { parts: systemInstructionParts }\n : undefined,\n contents,\n };\n}\n","export function getModelPath(modelId: string): string {\n return modelId.includes('/') ? modelId : `models/${modelId}`;\n}\n","import {\n createJsonErrorResponseHandler,\n type InferSchema,\n lazySchema,\n zodSchema,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\n\nconst googleErrorDataSchema = lazySchema(() =>\n zodSchema(\n z.object({\n error: z.object({\n code: z.number().nullable(),\n message: z.string(),\n status: z.string(),\n }),\n }),\n ),\n);\n\nexport type GoogleErrorData = InferSchema<typeof googleErrorDataSchema>;\n\nexport const googleFailedResponseHandler = createJsonErrorResponseHandler({\n errorSchema: googleErrorDataSchema,\n errorToMessage: data => data.error.message,\n});\n","import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\n\nexport type GoogleGenerativeAIModelId =\n // Stable models\n // https://ai.google.dev/gemini-api/docs/models/gemini\n | 'gemini-1.5-flash'\n | 'gemini-1.5-flash-latest'\n | 'gemini-1.5-flash-001'\n | 'gemini-1.5-flash-002'\n | 'gemini-1.5-flash-8b'\n | 'gemini-1.5-flash-8b-latest'\n | 'gemini-1.5-flash-8b-001'\n | 'gemini-1.5-pro'\n | 'gemini-1.5-pro-latest'\n | 'gemini-1.5-pro-001'\n | 'gemini-1.5-pro-002'\n | 'gemini-2.0-flash'\n | 'gemini-2.0-flash-001'\n | 'gemini-2.0-flash-live-001'\n | 'gemini-2.0-flash-lite'\n | 'gemini-2.0-pro-exp-02-05'\n | 'gemini-2.0-flash-thinking-exp-01-21'\n | 'gemini-2.0-flash-exp'\n | 'gemini-2.5-pro'\n | 'gemini-2.5-flash'\n | 'gemini-2.5-flash-image-preview'\n | 'gemini-2.5-flash-lite'\n | 'gemini-2.5-flash-lite-preview-09-2025'\n | 'gemini-2.5-flash-preview-04-17'\n | 'gemini-2.5-flash-preview-09-2025'\n | 'gemini-3-pro-preview'\n | 'gemini-3-pro-image-preview'\n | 'gemini-3-flash-preview'\n // latest version\n // https://ai.google.dev/gemini-api/docs/models#latest\n | 'gemini-pro-latest'\n | 'gemini-flash-latest'\n | 'gemini-flash-lite-latest'\n // Experimental models\n // https://ai.google.dev/gemini-api/docs/models/experimental-models\n | 'gemini-2.5-pro-exp-03-25'\n | 'gemini-exp-1206'\n | 'gemma-3-12b-it'\n | 'gemma-3-27b-it'\n | (string & {});\n\nexport const googleGenerativeAIProviderOptions = lazySchema(() =>\n zodSchema(\n z.object({\n responseModalities: z.array(z.enum(['TEXT', 'IMAGE'])).optional(),\n\n thinkingConfig: z\n .object({\n thinkingBudget: z.number().optional(),\n includeThoughts: z.boolean().optional(),\n // https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#thinking_level\n thinkingLevel: z\n .enum(['minimal', 'low', 'medium', 'high'])\n .optional(),\n })\n .optional(),\n\n /**\n * Optional.\n * The name of the cached content used as context to serve the prediction.\n * Format: cac