@ai-sdk/google
Version:
The **[Google Generative AI provider](https://ai-sdk.dev/providers/ai-sdk-providers/google-generative-ai)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the [Google Generative AI](https://ai.google/discover/generativeai/)
1 lines • 71 kB
Source Map (JSON)
{"version":3,"sources":["../../src/google-generative-ai-language-model.ts","../../src/convert-json-schema-to-openapi-schema.ts","../../src/convert-to-google-generative-ai-messages.ts","../../src/get-model-path.ts","../../src/google-error.ts","../../src/google-generative-ai-options.ts","../../src/google-prepare-tools.ts","../../src/map-google-generative-ai-finish-reason.ts","../../src/tool/google-search.ts","../../src/tool/url-context.ts","../../src/tool/code-execution.ts","../../src/google-tools.ts"],"sourcesContent":["import {\n LanguageModelV2,\n LanguageModelV2CallWarning,\n LanguageModelV2Content,\n LanguageModelV2FinishReason,\n LanguageModelV2Source,\n LanguageModelV2StreamPart,\n LanguageModelV2Usage,\n SharedV2ProviderMetadata,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n ParseResult,\n Resolvable,\n combineHeaders,\n createEventSourceResponseHandler,\n createJsonResponseHandler,\n generateId,\n parseProviderOptions,\n postJsonToApi,\n resolve,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport { convertJSONSchemaToOpenAPISchema } from './convert-json-schema-to-openapi-schema';\nimport { convertToGoogleGenerativeAIMessages } from './convert-to-google-generative-ai-messages';\nimport { getModelPath } from './get-model-path';\nimport { googleFailedResponseHandler } from './google-error';\nimport { GoogleGenerativeAIContentPart } from './google-generative-ai-prompt';\nimport {\n GoogleGenerativeAIModelId,\n googleGenerativeAIProviderOptions,\n} from './google-generative-ai-options';\nimport { prepareTools } from './google-prepare-tools';\nimport { mapGoogleGenerativeAIFinishReason } from './map-google-generative-ai-finish-reason';\nimport {\n groundingChunkSchema,\n groundingMetadataSchema,\n} from './tool/google-search';\nimport { urlContextMetadataSchema } from './tool/url-context';\n\ntype GoogleGenerativeAIConfig = {\n provider: string;\n baseURL: string;\n headers: Resolvable<Record<string, string | undefined>>;\n fetch?: FetchFunction;\n generateId: () => string;\n\n /**\n * The supported URLs for the model.\n */\n supportedUrls?: () => LanguageModelV2['supportedUrls'];\n};\n\nexport class GoogleGenerativeAILanguageModel implements LanguageModelV2 {\n readonly specificationVersion = 'v2';\n\n readonly modelId: GoogleGenerativeAIModelId;\n\n private readonly config: GoogleGenerativeAIConfig;\n private readonly generateId: () => string;\n\n constructor(\n modelId: GoogleGenerativeAIModelId,\n config: GoogleGenerativeAIConfig,\n ) {\n this.modelId = modelId;\n this.config = config;\n this.generateId = config.generateId ?? generateId;\n }\n\n get provider(): string {\n return this.config.provider;\n }\n\n get supportedUrls() {\n return this.config.supportedUrls?.() ?? {};\n }\n\n private async getArgs({\n prompt,\n maxOutputTokens,\n temperature,\n topP,\n topK,\n frequencyPenalty,\n presencePenalty,\n stopSequences,\n responseFormat,\n seed,\n tools,\n toolChoice,\n providerOptions,\n }: Parameters<LanguageModelV2['doGenerate']>[0]) {\n const warnings: LanguageModelV2CallWarning[] = [];\n\n const googleOptions = await parseProviderOptions({\n provider: 'google',\n providerOptions,\n schema: googleGenerativeAIProviderOptions,\n });\n\n // Add warning if includeThoughts is used with a non-Vertex Google provider\n if (\n googleOptions?.thinkingConfig?.includeThoughts === true &&\n !this.config.provider.startsWith('google.vertex.')\n ) {\n warnings.push({\n type: 'other',\n message:\n \"The 'includeThoughts' option is only supported with the Google Vertex provider \" +\n 'and might not be supported or could behave unexpectedly with the current Google provider ' +\n `(${this.config.provider}).`,\n });\n }\n\n const isGemmaModel = this.modelId.toLowerCase().startsWith('gemma-');\n\n const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(\n prompt,\n { isGemmaModel },\n );\n\n const {\n tools: googleTools,\n toolConfig: googleToolConfig,\n toolWarnings,\n } = prepareTools({\n tools,\n toolChoice,\n modelId: this.modelId,\n });\n\n return {\n args: {\n generationConfig: {\n // standardized settings:\n maxOutputTokens,\n temperature,\n topK,\n topP,\n frequencyPenalty,\n presencePenalty,\n stopSequences,\n seed,\n\n // response format:\n responseMimeType:\n responseFormat?.type === 'json' ? 'application/json' : undefined,\n responseSchema:\n responseFormat?.type === 'json' &&\n responseFormat.schema != null &&\n // Google GenAI does not support all OpenAPI Schema features,\n // so this is needed as an escape hatch:\n // TODO convert into provider option\n (googleOptions?.structuredOutputs ?? true)\n ? convertJSONSchemaToOpenAPISchema(responseFormat.schema)\n : undefined,\n ...(googleOptions?.audioTimestamp && {\n audioTimestamp: googleOptions.audioTimestamp,\n }),\n\n // provider options:\n responseModalities: googleOptions?.responseModalities,\n thinkingConfig: googleOptions?.thinkingConfig,\n },\n contents,\n systemInstruction: isGemmaModel ? undefined : systemInstruction,\n safetySettings: googleOptions?.safetySettings,\n tools: googleTools,\n toolConfig: googleToolConfig,\n cachedContent: googleOptions?.cachedContent,\n },\n warnings: [...warnings, ...toolWarnings],\n };\n }\n\n async doGenerate(\n options: Parameters<LanguageModelV2['doGenerate']>[0],\n ): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>> {\n const { args, warnings } = await this.getArgs(options);\n const body = JSON.stringify(args);\n\n const mergedHeaders = combineHeaders(\n await resolve(this.config.headers),\n options.headers,\n );\n\n const {\n responseHeaders,\n value: response,\n rawValue: rawResponse,\n } = await postJsonToApi({\n url: `${this.config.baseURL}/${getModelPath(\n this.modelId,\n )}:generateContent`,\n headers: mergedHeaders,\n body: args,\n failedResponseHandler: googleFailedResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(responseSchema),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n const candidate = response.candidates[0];\n const content: Array<LanguageModelV2Content> = [];\n\n // map ordered parts to content:\n const parts = candidate.content?.parts ?? [];\n\n const usageMetadata = response.usageMetadata;\n\n // Associates a code execution result with its preceding call.\n let lastCodeExecutionToolCallId: string | undefined;\n\n // Build content array from all parts\n for (const part of parts) {\n if ('executableCode' in part && part.executableCode?.code) {\n const toolCallId = this.config.generateId();\n lastCodeExecutionToolCallId = toolCallId;\n\n content.push({\n type: 'tool-call',\n toolCallId,\n toolName: 'code_execution',\n input: JSON.stringify(part.executableCode),\n providerExecuted: true,\n });\n } else if ('codeExecutionResult' in part && part.codeExecutionResult) {\n content.push({\n type: 'tool-result',\n // Assumes a result directly follows its corresponding call part.\n toolCallId: lastCodeExecutionToolCallId!,\n toolName: 'code_execution',\n result: {\n outcome: part.codeExecutionResult.outcome,\n output: part.codeExecutionResult.output,\n },\n providerExecuted: true,\n });\n // Clear the ID after use to avoid accidental reuse.\n lastCodeExecutionToolCallId = undefined;\n } else if ('text' in part && part.text != null && part.text.length > 0) {\n if (part.thought === true) {\n content.push({ type: 'reasoning', text: part.text });\n } else {\n content.push({ type: 'text', text: part.text });\n }\n } else if ('functionCall' in part) {\n content.push({\n type: 'tool-call' as const,\n toolCallId: this.config.generateId(),\n toolName: part.functionCall.name,\n input: JSON.stringify(part.functionCall.args),\n });\n } else if ('inlineData' in part) {\n content.push({\n type: 'file' as const,\n data: part.inlineData.data,\n mediaType: part.inlineData.mimeType,\n });\n }\n }\n\n const sources =\n extractSources({\n groundingMetadata: candidate.groundingMetadata,\n generateId: this.config.generateId,\n }) ?? [];\n for (const source of sources) {\n content.push(source);\n }\n\n return {\n content,\n finishReason: mapGoogleGenerativeAIFinishReason({\n finishReason: candidate.finishReason,\n hasToolCalls: content.some(part => part.type === 'tool-call'),\n }),\n usage: {\n inputTokens: usageMetadata?.promptTokenCount ?? undefined,\n outputTokens: usageMetadata?.candidatesTokenCount ?? undefined,\n totalTokens: usageMetadata?.totalTokenCount ?? undefined,\n reasoningTokens: usageMetadata?.thoughtsTokenCount ?? undefined,\n cachedInputTokens: usageMetadata?.cachedContentTokenCount ?? undefined,\n },\n warnings,\n providerMetadata: {\n google: {\n groundingMetadata: candidate.groundingMetadata ?? null,\n urlContextMetadata: candidate.urlContextMetadata ?? null,\n safetyRatings: candidate.safetyRatings ?? null,\n usageMetadata: usageMetadata ?? null,\n },\n },\n request: { body },\n response: {\n // TODO timestamp, model id, id\n headers: responseHeaders,\n body: rawResponse,\n },\n };\n }\n\n async doStream(\n options: Parameters<LanguageModelV2['doStream']>[0],\n ): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>> {\n const { args, warnings } = await this.getArgs(options);\n\n const body = JSON.stringify(args);\n const headers = combineHeaders(\n await resolve(this.config.headers),\n options.headers,\n );\n\n const { responseHeaders, value: response } = await postJsonToApi({\n url: `${this.config.baseURL}/${getModelPath(\n this.modelId,\n )}:streamGenerateContent?alt=sse`,\n headers,\n body: args,\n failedResponseHandler: googleFailedResponseHandler,\n successfulResponseHandler: createEventSourceResponseHandler(chunkSchema),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n let finishReason: LanguageModelV2FinishReason = 'unknown';\n const usage: LanguageModelV2Usage = {\n inputTokens: undefined,\n outputTokens: undefined,\n totalTokens: undefined,\n };\n let providerMetadata: SharedV2ProviderMetadata | undefined = undefined;\n\n const generateId = this.config.generateId;\n let hasToolCalls = false;\n\n // Track active blocks to group consecutive parts of same type\n let currentTextBlockId: string | null = null;\n let currentReasoningBlockId: string | null = null;\n let blockCounter = 0;\n\n // Track emitted sources to prevent duplicates\n const emittedSourceUrls = new Set<string>();\n // Associates a code execution result with its preceding call.\n let lastCodeExecutionToolCallId: string | undefined;\n\n return {\n stream: response.pipeThrough(\n new TransformStream<\n ParseResult<z.infer<typeof chunkSchema>>,\n LanguageModelV2StreamPart\n >({\n start(controller) {\n controller.enqueue({ type: 'stream-start', warnings });\n },\n\n transform(chunk, controller) {\n if (options.includeRawChunks) {\n controller.enqueue({ type: 'raw', rawValue: chunk.rawValue });\n }\n\n if (!chunk.success) {\n controller.enqueue({ type: 'error', error: chunk.error });\n return;\n }\n\n const value = chunk.value;\n\n const usageMetadata = value.usageMetadata;\n\n if (usageMetadata != null) {\n usage.inputTokens = usageMetadata.promptTokenCount ?? undefined;\n usage.outputTokens =\n usageMetadata.candidatesTokenCount ?? undefined;\n usage.totalTokens = usageMetadata.totalTokenCount ?? undefined;\n usage.reasoningTokens =\n usageMetadata.thoughtsTokenCount ?? undefined;\n usage.cachedInputTokens =\n usageMetadata.cachedContentTokenCount ?? undefined;\n }\n\n const candidate = value.candidates?.[0];\n\n // sometimes the API returns an empty candidates array\n if (candidate == null) {\n return;\n }\n\n const content = candidate.content;\n\n const sources = extractSources({\n groundingMetadata: candidate.groundingMetadata,\n generateId,\n });\n if (sources != null) {\n for (const source of sources) {\n if (\n source.sourceType === 'url' &&\n !emittedSourceUrls.has(source.url)\n ) {\n emittedSourceUrls.add(source.url);\n controller.enqueue(source);\n }\n }\n }\n\n // Process tool call's parts before determining finishReason to ensure hasToolCalls is properly set\n if (content != null) {\n // Process text parts individually to handle reasoning parts\n const parts = content.parts ?? [];\n for (const part of parts) {\n if ('executableCode' in part && part.executableCode?.code) {\n const toolCallId = generateId();\n lastCodeExecutionToolCallId = toolCallId;\n\n controller.enqueue({\n type: 'tool-call',\n toolCallId,\n toolName: 'code_execution',\n input: JSON.stringify(part.executableCode),\n providerExecuted: true,\n });\n\n hasToolCalls = true;\n } else if (\n 'codeExecutionResult' in part &&\n part.codeExecutionResult\n ) {\n // Assumes a result directly follows its corresponding call part.\n const toolCallId = lastCodeExecutionToolCallId;\n\n if (toolCallId) {\n controller.enqueue({\n type: 'tool-result',\n toolCallId,\n toolName: 'code_execution',\n result: {\n outcome: part.codeExecutionResult.outcome,\n output: part.codeExecutionResult.output,\n },\n providerExecuted: true,\n });\n // Clear the ID after use.\n lastCodeExecutionToolCallId = undefined;\n }\n } else if (\n 'text' in part &&\n part.text != null &&\n part.text.length > 0\n ) {\n if (part.thought === true) {\n // End any active text block before starting reasoning\n if (currentTextBlockId !== null) {\n controller.enqueue({\n type: 'text-end',\n id: currentTextBlockId,\n });\n currentTextBlockId = null;\n }\n\n // Start new reasoning block if not already active\n if (currentReasoningBlockId === null) {\n currentReasoningBlockId = String(blockCounter++);\n controller.enqueue({\n type: 'reasoning-start',\n id: currentReasoningBlockId,\n });\n }\n\n controller.enqueue({\n type: 'reasoning-delta',\n id: currentReasoningBlockId,\n delta: part.text,\n });\n } else {\n // End any active reasoning block before starting text\n if (currentReasoningBlockId !== null) {\n controller.enqueue({\n type: 'reasoning-end',\n id: currentReasoningBlockId,\n });\n currentReasoningBlockId = null;\n }\n\n // Start new text block if not already active\n if (currentTextBlockId === null) {\n currentTextBlockId = String(blockCounter++);\n controller.enqueue({\n type: 'text-start',\n id: currentTextBlockId,\n });\n }\n\n controller.enqueue({\n type: 'text-delta',\n id: currentTextBlockId,\n delta: part.text,\n });\n }\n }\n }\n\n const inlineDataParts = getInlineDataParts(content.parts);\n if (inlineDataParts != null) {\n for (const part of inlineDataParts) {\n controller.enqueue({\n type: 'file',\n mediaType: part.inlineData.mimeType,\n data: part.inlineData.data,\n });\n }\n }\n\n const toolCallDeltas = getToolCallsFromParts({\n parts: content.parts,\n generateId,\n });\n\n if (toolCallDeltas != null) {\n for (const toolCall of toolCallDeltas) {\n controller.enqueue({\n type: 'tool-input-start',\n id: toolCall.toolCallId,\n toolName: toolCall.toolName,\n });\n\n controller.enqueue({\n type: 'tool-input-delta',\n id: toolCall.toolCallId,\n delta: toolCall.args,\n });\n\n controller.enqueue({\n type: 'tool-input-end',\n id: toolCall.toolCallId,\n });\n\n controller.enqueue({\n type: 'tool-call',\n toolCallId: toolCall.toolCallId,\n toolName: toolCall.toolName,\n input: toolCall.args,\n });\n\n hasToolCalls = true;\n }\n }\n }\n\n if (candidate.finishReason != null) {\n finishReason = mapGoogleGenerativeAIFinishReason({\n finishReason: candidate.finishReason,\n hasToolCalls,\n });\n\n providerMetadata = {\n google: {\n groundingMetadata: candidate.groundingMetadata ?? null,\n urlContextMetadata: candidate.urlContextMetadata ?? null,\n safetyRatings: candidate.safetyRatings ?? null,\n },\n };\n if (usageMetadata != null) {\n providerMetadata.google.usageMetadata = usageMetadata;\n }\n }\n },\n\n flush(controller) {\n // Close any open blocks before finishing\n if (currentTextBlockId !== null) {\n controller.enqueue({\n type: 'text-end',\n id: currentTextBlockId,\n });\n }\n if (currentReasoningBlockId !== null) {\n controller.enqueue({\n type: 'reasoning-end',\n id: currentReasoningBlockId,\n });\n }\n\n controller.enqueue({\n type: 'finish',\n finishReason,\n usage,\n providerMetadata,\n });\n },\n }),\n ),\n response: { headers: responseHeaders },\n request: { body },\n };\n }\n}\n\nfunction getToolCallsFromParts({\n parts,\n generateId,\n}: {\n parts: z.infer<typeof contentSchema>['parts'];\n generateId: () => string;\n}) {\n const functionCallParts = parts?.filter(\n part => 'functionCall' in part,\n ) as Array<\n GoogleGenerativeAIContentPart & {\n functionCall: { name: string; args: unknown };\n }\n >;\n\n return functionCallParts == null || functionCallParts.length === 0\n ? undefined\n : functionCallParts.map(part => ({\n type: 'tool-call' as const,\n toolCallId: generateId(),\n toolName: part.functionCall.name,\n args: JSON.stringify(part.functionCall.args),\n }));\n}\n\nfunction getTextFromParts(parts: z.infer<typeof contentSchema>['parts']) {\n const textParts = parts?.filter(part => 'text' in part) as Array<\n GoogleGenerativeAIContentPart & { text: string }\n >;\n\n return textParts == null || textParts.length === 0\n ? undefined\n : textParts.map(part => part.text).join('');\n}\n\nfunction getInlineDataParts(parts: z.infer<typeof contentSchema>['parts']) {\n return parts?.filter(\n (\n part,\n ): part is {\n inlineData: { mimeType: string; data: string };\n } => 'inlineData' in part,\n );\n}\n\nfunction extractSources({\n groundingMetadata,\n generateId,\n}: {\n groundingMetadata: z.infer<typeof groundingMetadataSchema> | undefined | null;\n generateId: () => string;\n}): undefined | LanguageModelV2Source[] {\n return groundingMetadata?.groundingChunks\n ?.filter(\n (\n chunk,\n ): chunk is z.infer<typeof groundingChunkSchema> & {\n web: { uri: string; title?: string };\n } => chunk.web != null,\n )\n .map(chunk => ({\n type: 'source',\n sourceType: 'url',\n id: generateId(),\n url: chunk.web.uri,\n title: chunk.web.title,\n }));\n}\n\nconst contentSchema = z.object({\n parts: z\n .array(\n z.union([\n // note: order matters since text can be fully empty\n z.object({\n functionCall: z.object({\n name: z.string(),\n args: z.unknown(),\n }),\n }),\n z.object({\n inlineData: z.object({\n mimeType: z.string(),\n data: z.string(),\n }),\n }),\n z.object({\n executableCode: z\n .object({\n language: z.string(),\n code: z.string(),\n })\n .nullish(),\n codeExecutionResult: z\n .object({\n outcome: z.string(),\n output: z.string(),\n })\n .nullish(),\n text: z.string().nullish(),\n thought: z.boolean().nullish(),\n }),\n ]),\n )\n .nullish(),\n});\n\n// https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters\nexport const safetyRatingSchema = z.object({\n category: z.string().nullish(),\n probability: z.string().nullish(),\n probabilityScore: z.number().nullish(),\n severity: z.string().nullish(),\n severityScore: z.number().nullish(),\n blocked: z.boolean().nullish(),\n});\n\nconst usageSchema = z.object({\n cachedContentTokenCount: z.number().nullish(),\n thoughtsTokenCount: z.number().nullish(),\n promptTokenCount: z.number().nullish(),\n candidatesTokenCount: z.number().nullish(),\n totalTokenCount: z.number().nullish(),\n});\n\nconst responseSchema = z.object({\n candidates: z.array(\n z.object({\n content: contentSchema.nullish().or(z.object({}).strict()),\n finishReason: z.string().nullish(),\n safetyRatings: z.array(safetyRatingSchema).nullish(),\n groundingMetadata: groundingMetadataSchema.nullish(),\n urlContextMetadata: urlContextMetadataSchema.nullish(),\n }),\n ),\n usageMetadata: usageSchema.nullish(),\n});\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst chunkSchema = z.object({\n candidates: z\n .array(\n z.object({\n content: contentSchema.nullish(),\n finishReason: z.string().nullish(),\n safetyRatings: z.array(safetyRatingSchema).nullish(),\n groundingMetadata: groundingMetadataSchema.nullish(),\n urlContextMetadata: urlContextMetadataSchema.nullish(),\n }),\n )\n .nullish(),\n usageMetadata: usageSchema.nullish(),\n});\n","import { JSONSchema7Definition } from '@ai-sdk/provider';\n\n/**\n * Converts JSON Schema 7 to OpenAPI Schema 3.0\n */\nexport function convertJSONSchemaToOpenAPISchema(\n jsonSchema: JSONSchema7Definition | undefined,\n): unknown {\n // parameters need to be undefined if they are empty objects:\n if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {\n return undefined;\n }\n\n if (typeof jsonSchema === 'boolean') {\n return { type: 'boolean', properties: {} };\n }\n\n const {\n type,\n description,\n required,\n properties,\n items,\n allOf,\n anyOf,\n oneOf,\n format,\n const: constValue,\n minLength,\n enum: enumValues,\n } = jsonSchema;\n\n const result: Record<string, unknown> = {};\n\n if (description) result.description = description;\n if (required) result.required = required;\n if (format) result.format = format;\n\n if (constValue !== undefined) {\n result.enum = [constValue];\n }\n\n // Handle type\n if (type) {\n if (Array.isArray(type)) {\n if (type.includes('null')) {\n result.type = type.filter(t => t !== 'null')[0];\n result.nullable = true;\n } else {\n result.type = type;\n }\n } else if (type === 'null') {\n result.type = 'null';\n } else {\n result.type = type;\n }\n }\n\n // Handle enum\n if (enumValues !== undefined) {\n result.enum = enumValues;\n }\n\n if (properties != null) {\n result.properties = Object.entries(properties).reduce(\n (acc, [key, value]) => {\n acc[key] = convertJSONSchemaToOpenAPISchema(value);\n return acc;\n },\n {} as Record<string, unknown>,\n );\n }\n\n if (items) {\n result.items = Array.isArray(items)\n ? items.map(convertJSONSchemaToOpenAPISchema)\n : convertJSONSchemaToOpenAPISchema(items);\n }\n\n if (allOf) {\n result.allOf = allOf.map(convertJSONSchemaToOpenAPISchema);\n }\n if (anyOf) {\n // Handle cases where anyOf includes a null type\n if (\n anyOf.some(\n schema => typeof schema === 'object' && schema?.type === 'null',\n )\n ) {\n const nonNullSchemas = anyOf.filter(\n schema => !(typeof schema === 'object' && schema?.type === 'null'),\n );\n\n if (nonNullSchemas.length === 1) {\n // If there's only one non-null schema, convert it and make it nullable\n const converted = convertJSONSchemaToOpenAPISchema(nonNullSchemas[0]);\n if (typeof converted === 'object') {\n result.nullable = true;\n Object.assign(result, converted);\n }\n } else {\n // If there are multiple non-null schemas, keep them in anyOf\n result.anyOf = nonNullSchemas.map(convertJSONSchemaToOpenAPISchema);\n result.nullable = true;\n }\n } else {\n result.anyOf = anyOf.map(convertJSONSchemaToOpenAPISchema);\n }\n }\n if (oneOf) {\n result.oneOf = oneOf.map(convertJSONSchemaToOpenAPISchema);\n }\n\n if (minLength !== undefined) {\n result.minLength = minLength;\n }\n\n return result;\n}\n\nfunction isEmptyObjectSchema(jsonSchema: JSONSchema7Definition): boolean {\n return (\n jsonSchema != null &&\n typeof jsonSchema === 'object' &&\n jsonSchema.type === 'object' &&\n (jsonSchema.properties == null ||\n Object.keys(jsonSchema.properties).length === 0) &&\n !jsonSchema.additionalProperties\n );\n}\n","import {\n LanguageModelV2Prompt,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport {\n GoogleGenerativeAIContent,\n GoogleGenerativeAIContentPart,\n GoogleGenerativeAIPrompt,\n} from './google-generative-ai-prompt';\nimport { convertToBase64 } from '@ai-sdk/provider-utils';\n\nexport function convertToGoogleGenerativeAIMessages(\n prompt: LanguageModelV2Prompt,\n options?: { isGemmaModel?: boolean },\n): GoogleGenerativeAIPrompt {\n const systemInstructionParts: Array<{ text: string }> = [];\n const contents: Array<GoogleGenerativeAIContent> = [];\n let systemMessagesAllowed = true;\n const isGemmaModel = options?.isGemmaModel ?? false;\n\n for (const { role, content } of prompt) {\n switch (role) {\n case 'system': {\n if (!systemMessagesAllowed) {\n throw new UnsupportedFunctionalityError({\n functionality:\n 'system messages are only supported at the beginning of the conversation',\n });\n }\n\n systemInstructionParts.push({ text: content });\n break;\n }\n\n case 'user': {\n systemMessagesAllowed = false;\n\n const parts: GoogleGenerativeAIContentPart[] = [];\n\n for (const part of content) {\n switch (part.type) {\n case 'text': {\n parts.push({ text: part.text });\n break;\n }\n\n case 'file': {\n // default to image/jpeg for unknown image/* types\n const mediaType =\n part.mediaType === 'image/*' ? 'image/jpeg' : part.mediaType;\n\n parts.push(\n part.data instanceof URL\n ? {\n fileData: {\n mimeType: mediaType,\n fileUri: part.data.toString(),\n },\n }\n : {\n inlineData: {\n mimeType: mediaType,\n data: convertToBase64(part.data),\n },\n },\n );\n\n break;\n }\n }\n }\n\n contents.push({ role: 'user', parts });\n break;\n }\n\n case 'assistant': {\n systemMessagesAllowed = false;\n\n contents.push({\n role: 'model',\n parts: content\n .map(part => {\n switch (part.type) {\n case 'text': {\n return part.text.length === 0\n ? undefined\n : { text: part.text };\n }\n\n case 'file': {\n if (part.mediaType !== 'image/png') {\n throw new UnsupportedFunctionalityError({\n functionality:\n 'Only PNG images are supported in assistant messages',\n });\n }\n\n if (part.data instanceof URL) {\n throw new UnsupportedFunctionalityError({\n functionality:\n 'File data URLs in assistant messages are not supported',\n });\n }\n\n return {\n inlineData: {\n mimeType: part.mediaType,\n data: convertToBase64(part.data),\n },\n };\n }\n\n case 'tool-call': {\n return {\n functionCall: {\n name: part.toolName,\n args: part.input,\n },\n };\n }\n }\n })\n .filter(part => part !== undefined),\n });\n break;\n }\n\n case 'tool': {\n systemMessagesAllowed = false;\n\n contents.push({\n role: 'user',\n parts: content.map(part => ({\n functionResponse: {\n name: part.toolName,\n response: {\n name: part.toolName,\n content: part.output.value,\n },\n },\n })),\n });\n break;\n }\n }\n }\n\n if (\n isGemmaModel &&\n systemInstructionParts.length > 0 &&\n contents.length > 0 &&\n contents[0].role === 'user'\n ) {\n const systemText = systemInstructionParts\n .map(part => part.text)\n .join('\\n\\n');\n\n contents[0].parts.unshift({ text: systemText + '\\n\\n' });\n }\n\n return {\n systemInstruction:\n systemInstructionParts.length > 0 && !isGemmaModel\n ? { parts: systemInstructionParts }\n : undefined,\n contents,\n };\n}\n","export function getModelPath(modelId: string): string {\n return modelId.includes('/') ? modelId : `models/${modelId}`;\n}\n","import { createJsonErrorResponseHandler } from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\n\nconst googleErrorDataSchema = z.object({\n error: z.object({\n code: z.number().nullable(),\n message: z.string(),\n status: z.string(),\n }),\n});\n\nexport type GoogleErrorData = z.infer<typeof googleErrorDataSchema>;\n\nexport const googleFailedResponseHandler = createJsonErrorResponseHandler({\n errorSchema: googleErrorDataSchema,\n errorToMessage: data => data.error.message,\n});\n","import { z } from 'zod/v4';\n\nexport type GoogleGenerativeAIModelId =\n // Stable models\n // https://ai.google.dev/gemini-api/docs/models/gemini\n | 'gemini-1.5-flash'\n | 'gemini-1.5-flash-latest'\n | 'gemini-1.5-flash-001'\n | 'gemini-1.5-flash-002'\n | 'gemini-1.5-flash-8b'\n | 'gemini-1.5-flash-8b-latest'\n | 'gemini-1.5-flash-8b-001'\n | 'gemini-1.5-pro'\n | 'gemini-1.5-pro-latest'\n | 'gemini-1.5-pro-001'\n | 'gemini-1.5-pro-002'\n | 'gemini-2.0-flash'\n | 'gemini-2.0-flash-001'\n | 'gemini-2.0-flash-live-001'\n | 'gemini-2.0-flash-lite'\n | 'gemini-2.0-pro-exp-02-05'\n | 'gemini-2.0-flash-thinking-exp-01-21'\n | 'gemini-2.0-flash-exp'\n | 'gemini-2.5-pro'\n | 'gemini-2.5-flash'\n | 'gemini-2.5-flash-lite'\n // Experimental models\n // https://ai.google.dev/gemini-api/docs/models/experimental-models\n | 'gemini-2.5-pro-exp-03-25'\n | 'gemini-2.5-flash-preview-04-17'\n | 'gemini-exp-1206'\n | 'gemma-3-12b-it'\n | 'gemma-3-27b-it'\n | (string & {});\n\nexport const googleGenerativeAIProviderOptions = z.object({\n responseModalities: z.array(z.enum(['TEXT', 'IMAGE'])).optional(),\n\n thinkingConfig: z\n .object({\n thinkingBudget: z.number().optional(),\n includeThoughts: z.boolean().optional(),\n })\n .optional(),\n\n /**\nOptional.\nThe name of the cached content used as context to serve the prediction.\nFormat: cachedContents/{cachedContent}\n */\n cachedContent: z.string().optional(),\n\n /**\n * Optional. Enable structured output. Default is true.\n *\n * This is useful when the JSON Schema contains elements that are\n * not supported by the OpenAPI schema version that\n * Google Generative AI uses. You can use this to disable\n * structured outputs if you need to.\n */\n structuredOutputs: z.boolean().optional(),\n\n /**\nOptional. A list of unique safety settings for blocking unsafe content.\n */\n safetySettings: z\n .array(\n z.object({\n category: z.enum([\n 'HARM_CATEGORY_UNSPECIFIED',\n 'HARM_CATEGORY_HATE_SPEECH',\n 'HARM_CATEGORY_DANGEROUS_CONTENT',\n 'HARM_CATEGORY_HARASSMENT',\n 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n 'HARM_CATEGORY_CIVIC_INTEGRITY',\n ]),\n threshold: z.enum([\n 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',\n 'BLOCK_LOW_AND_ABOVE',\n 'BLOCK_MEDIUM_AND_ABOVE',\n 'BLOCK_ONLY_HIGH',\n 'BLOCK_NONE',\n 'OFF',\n ]),\n }),\n )\n .optional(),\n\n threshold: z\n .enum([\n 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',\n 'BLOCK_LOW_AND_ABOVE',\n 'BLOCK_MEDIUM_AND_ABOVE',\n 'BLOCK_ONLY_HIGH',\n 'BLOCK_NONE',\n 'OFF',\n ])\n .optional(),\n\n /**\n * Optional. Enables timestamp understanding for audio-only files.\n *\n * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding\n */\n audioTimestamp: z.boolean().optional(),\n});\n\nexport type GoogleGenerativeAIProviderOptions = z.infer<\n typeof googleGenerativeAIProviderOptions\n>;\n","import {\n LanguageModelV2CallOptions,\n LanguageModelV2CallWarning,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport { convertJSONSchemaToOpenAPISchema } from './convert-json-schema-to-openapi-schema';\nimport { GoogleGenerativeAIModelId } from './google-generative-ai-options';\n\nexport function prepareTools({\n tools,\n toolChoice,\n modelId,\n}: {\n tools: LanguageModelV2CallOptions['tools'];\n toolChoice?: LanguageModelV2CallOptions['toolChoice'];\n modelId: GoogleGenerativeAIModelId;\n}): {\n tools:\n | {\n functionDeclarations: Array<{\n name: string;\n description: string;\n parameters: unknown;\n }>;\n }\n | Record<string, any>\n | undefined;\n toolConfig:\n | undefined\n | {\n functionCallingConfig: {\n mode: 'AUTO' | 'NONE' | 'ANY';\n allowedFunctionNames?: string[];\n };\n };\n toolWarnings: LanguageModelV2CallWarning[];\n} {\n // when the tools array is empty, change it to undefined to prevent errors:\n tools = tools?.length ? tools : undefined;\n\n const toolWarnings: LanguageModelV2CallWarning[] = [];\n\n const isGemini2 = modelId.includes('gemini-2');\n const supportsDynamicRetrieval =\n modelId.includes('gemini-1.5-flash') && !modelId.includes('-8b');\n\n if (tools == null) {\n return { tools: undefined, toolConfig: undefined, toolWarnings };\n }\n\n // Check for mixed tool types and add warnings\n const hasFunctionTools = tools.some(tool => tool.type === 'function');\n const hasProviderDefinedTools = tools.some(\n tool => tool.type === 'provider-defined',\n );\n\n if (hasFunctionTools && hasProviderDefinedTools) {\n toolWarnings.push({\n type: 'unsupported-tool',\n tool: tools.find(tool => tool.type === 'function')!,\n details:\n 'Cannot mix function tools with provider-defined tools in the same request. Please use either function tools or provider-defined tools, but not both.',\n });\n }\n\n if (hasProviderDefinedTools) {\n const googleTools: Record<string, any> = {};\n\n const providerDefinedTools = tools.filter(\n tool => tool.type === 'provider-defined',\n );\n providerDefinedTools.forEach(tool => {\n switch (tool.id) {\n case 'google.google_search':\n if (isGemini2) {\n googleTools.googleSearch = {};\n } else if (supportsDynamicRetrieval) {\n // For non-Gemini-2 models that don't support dynamic retrieval, use basic googleSearchRetrieval\n googleTools.googleSearchRetrieval = {\n dynamicRetrievalConfig: {\n mode: tool.args.mode as\n | 'MODE_DYNAMIC'\n | 'MODE_UNSPECIFIED'\n | undefined,\n dynamicThreshold: tool.args.dynamicThreshold as\n | number\n | undefined,\n },\n };\n } else {\n googleTools.googleSearchRetrieval = {};\n }\n break;\n case 'google.url_context':\n if (isGemini2) {\n googleTools.urlContext = {};\n } else {\n toolWarnings.push({\n type: 'unsupported-tool',\n tool,\n details:\n 'The URL context tool is not supported with other Gemini models than Gemini 2.',\n });\n }\n break;\n case 'google.code_execution':\n if (isGemini2) {\n googleTools.codeExecution = {};\n } else {\n toolWarnings.push({\n type: 'unsupported-tool',\n tool,\n details:\n 'The code execution tools is not supported with other Gemini models than Gemini 2.',\n });\n }\n break;\n default:\n toolWarnings.push({ type: 'unsupported-tool', tool });\n break;\n }\n });\n\n return {\n tools: Object.keys(googleTools).length > 0 ? googleTools : undefined,\n toolConfig: undefined,\n toolWarnings,\n };\n }\n\n const functionDeclarations = [];\n for (const tool of tools) {\n switch (tool.type) {\n case 'function':\n functionDeclarations.push({\n name: tool.name,\n description: tool.description ?? '',\n parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema),\n });\n break;\n default:\n toolWarnings.push({ type: 'unsupported-tool', tool });\n break;\n }\n }\n\n if (toolChoice == null) {\n return {\n tools: { functionDeclarations },\n toolConfig: undefined,\n toolWarnings,\n };\n }\n\n const type = toolChoice.type;\n\n switch (type) {\n case 'auto':\n return {\n tools: { functionDeclarations },\n toolConfig: { functionCallingConfig: { mode: 'AUTO' } },\n toolWarnings,\n };\n case 'none':\n return {\n tools: { functionDeclarations },\n toolConfig: { functionCallingConfig: { mode: 'NONE' } },\n toolWarnings,\n };\n case 'required':\n return {\n tools: { functionDeclarations },\n toolConfig: { functionCallingConfig: { mode: 'ANY' } },\n toolWarnings,\n };\n case 'tool':\n return {\n tools: { functionDeclarations },\n toolConfig: {\n functionCallingConfig: {\n mode: 'ANY',\n allowedFunctionNames: [toolChoice.toolName],\n },\n },\n toolWarnings,\n };\n default: {\n const _exhaustiveCheck: never = type;\n throw new UnsupportedFunctionalityError({\n functionality: `tool choice type: ${_exhaustiveCheck}`,\n });\n }\n }\n}\n","import { LanguageModelV2FinishReason } from '@ai-sdk/provider';\n\nexport function mapGoogleGenerativeAIFinishReason({\n finishReason,\n hasToolCalls,\n}: {\n finishReason: string | null | undefined;\n hasToolCalls: boolean;\n}): LanguageModelV2FinishReason {\n switch (finishReason) {\n case 'STOP':\n return hasToolCalls ? 'tool-calls' : 'stop';\n case 'MAX_TOKENS':\n return 'length';\n case 'IMAGE_SAFETY':\n case 'RECITATION':\n case 'SAFETY':\n case 'BLOCKLIST':\n case 'PROHIBITED_CONTENT':\n case 'SPII':\n return 'content-filter';\n case 'FINISH_REASON_UNSPECIFIED':\n case 'OTHER':\n return 'other';\n case 'MALFORMED_FUNCTION_CALL':\n return 'error';\n default:\n return 'unknown';\n }\n}\n","import { createProviderDefinedToolFactory } from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\n\n// https://ai.google.dev/gemini-api/docs/google-search\n// https://ai.google.dev/api/generate-content#GroundingSupport\n// https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/grounding-with-google-search\n\nexport const groundingChunkSchema = z.object({\n web: z.object({ uri: z.string(), title: z.string() }).nullish(),\n retrievedContext: z.object({ uri: z.string(), title: z.string() }).nullish(),\n});\n\nexport const groundingMetadataSchema = z.object({\n webSearchQueries: z.array(z.string()).nullish(),\n retrievalQueries: z.array(z.string()).nullish(),\n searchEntryPoint: z.object({ renderedContent: z.string() }).nullish(),\n groundingChunks: z.array(groundingChunkSchema).nullish(),\n groundingSupports: z\n .array(\n z.object({\n segment: z.object({\n startIndex: z.number().nullish(),\n endIndex: z.number().nullish(),\n text: z.string().nullish(),\n }),\n segment_text: z.string().nullish(),\n groundingChunkIndices: z.array(z.number()).nullish(),\n supportChunkIndices: z.array(z.number()).nullish(),\n confidenceScores: z.array(z.number()).nullish(),\n confidenceScore: z.array(z.number()).nullish(),\n }),\n )\n .nullish(),\n retrievalMetadata: z\n .union([\n z.object({\n webDynamicRetrievalScore: z.number(),\n }),\n z.object({}),\n ])\n .nullish(),\n});\n\nexport const googleSearch = createProviderDefinedToolFactory<\n {},\n {\n /**\n * The mode of the predictor to be used in dynamic retrieval. The following modes are supported:\n * - MODE_DYNAMIC: Run retrieval only when system decides it is necessary\n * - MODE_UNSPECIFIED: Always trigger retrieval\n * @default MODE_UNSPECIFIED\n */\n mode?: 'MODE_DYNAMIC' | 'MODE_UNSPECIFIED';\n\n /**\n * The threshold to be used in dynamic retrieval (if not set, a system default value is used).\n */\n dynamicThreshold?: number;\n }\n>({\n id: 'google.google_search',\n name: 'google_search',\n inputSchema: z.object({\n mode: z\n .enum(['MODE_DYNAMIC', 'MODE_UNSPECIFIED'])\n .default('MODE_UNSPECIFIED'),\n dynamicThreshold: z.number().default(1),\n }),\n});\n","import { createProviderDefinedToolFactory } from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\n\n// https://ai.google.dev/api/generate-content#UrlRetrievalMetadata\nconst urlMetadataSchema = z.object({\n retrievedUrl: z.string(),\n urlRetrievalStatus: z.string(),\n});\n\nexport const urlContextMetadataSchema = z.object({\n urlMetadata: z.array(urlMetadataSchema),\n});\n\nexport const urlContext = createProviderDefinedToolFactory<\n {\n // Url context does not have any input schema, it will directly use the url from the prompt\n },\n {}\n>({\n id: 'google.url_context',\n name: 'url_context',\n inputSchema: z.object({}),\n});\n","import { createProviderDefinedToolFactoryWithOutputSchema } from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\n\n/**\n * A tool that enables the model to generate and run Python code.\n *\n * @note Ensure the selected model supports Code Execution.\n * Multi-tool usage with the code execution tool is typically compatible with Gemini >=2 models.\n *\n * @see https://ai.google.dev/gemini-api/docs/code-execution (Google AI)\n * @see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/code-execution-api (Vertex AI)\n */\nexport const codeExecution = createProviderDefinedToolFactoryWithOutputSchema<\n {\n language: string;\n code: string;\n },\n {\n outcome: string;\n output: string;\n },\n {}\n>({\n id: 'google.code_execution',\n name: 'code_execution',\n inputSchema: z.object({\n language: z.string().describe('The programming language of the code.'),\n code: z.string().describe('The code to be executed.'),\n }),\n outputSchema: z.object({\n outcome: z\n .string()\n .describe('The outcome of the execution (e.g., \"OUTCOME_OK\").'),\n output: z.string().describe('The output from the code execution.'),\n }),\n});\n","import { codeExecution } from './tool/code-execution';\nimport { googleSearch } from './tool/google-search';\nimport { urlContext } from './tool/url-context';\n\nexport const googleTools = {\n /**\n * Creates a Google search tool that gives Google direct access to real-time web content.\n * Must have name \"google_search\".\n */\n googleSearch,\n\n /**\n * Creates a URL context tool that gives Google direct access to real-time web content.\n * Must have name \"url_context\".\n */\n urlContext,\n /**\n * A tool that enables the model to generate and run Python code.\n * Must have name \"code_execution\".\n *\n * @note Ensure the selected model supports Code Execution.\n * Multi-tool usage with the code execution tool is typically compatible with Gemini >=2 models.\n *\n * @see https://ai.google.dev/gemini-api/docs/code-execution (Google AI)\n * @see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/code-execution-api (Vertex AI)\n */\n codeExecution,\n};\n"],"mappings":";AAUA;AAAA,EAIE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP,SAAS,KAAAA,UAAS;;;ACjBX,SAAS,iCACd,YACS;AAET,MAAI,cAAc,QAAQ,oBAAoB,UAAU,GAAG;AACzD,WAAO;AAAA,EACT;AAEA,MAAI,OAAO,eAAe,WAAW;AACnC,WAAO,EAAE,MAAM,WAAW,YAAY,CAAC,EAAE;AAAA,EAC3C;AAEA,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,OAAO;AAAA,IACP;AAAA,IACA,MAAM;AAAA,EACR,IAAI;AAEJ,QAAM,SAAkC,CAAC;AAEzC,MAAI;AAAa,WAAO,cAAc;AACtC,MAAI;AAAU,WAAO,WAAW;AAChC,MAAI;AAAQ,WAAO,SAAS;AAE5B,MAAI,eAAe,QAAW;AAC5B,WAAO,OAAO,CAAC,UAAU;AAAA,EAC3B;AAGA,MAAI,MAAM;AACR,QAAI,MAAM,QAAQ,IAAI,GAAG;AACvB,UAAI,KAAK,SAAS,MAAM,GAAG;AACzB,eAAO,OAAO,KAAK,OAAO,OAAK,MAAM,MAAM,EAAE,CAAC;AAC9C,eAAO,WAAW;AAAA,MA