UNPKG

ai

Version:

AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript

1,393 lines (1,281 loc) 168 kB
import { ModelMessage, Tool, InferToolInput, InferToolOutput, AssistantModelMessage, ToolModelMessage, ReasoningPart, Schema, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator, ToolCall, InferSchema, FlexibleSchema, DataContent, Validator, StandardSchemaV1, Resolvable, FetchFunction } from '@ai-sdk/provider-utils'; export { AssistantContent, AssistantModelMessage, DataContent, FilePart, IdGenerator, ImagePart, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, tool, zodSchema } from '@ai-sdk/provider-utils'; import { AttributeValue, Tracer } from '@opentelemetry/api'; import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV2, ImageModelV2CallWarning, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, LanguageModelV2Middleware, SharedV2ProviderMetadata, SpeechModelV2, SpeechModelV2CallWarning, TranscriptionModelV2, TranscriptionModelV2CallWarning, LanguageModelV2Usage, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7, JSONParseError, TypeValidationError, ProviderV2, NoSuchModelError, JSONObject } from '@ai-sdk/provider'; export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider'; import * as z3 from 'zod/v3'; import * as z4 from 'zod/v4'; import { z } from 'zod/v4'; import { ServerResponse } from 'node:http'; import { ServerResponse as ServerResponse$1 } from 'http'; type CallSettings = { /** Maximum number of tokens to generate. */ maxOutputTokens?: number; /** Temperature setting. The range depends on the provider and model. It is recommended to set either `temperature` or `topP`, but not both. */ temperature?: number; /** Nucleus sampling. This is a number between 0 and 1. E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered. It is recommended to set either `temperature` or `topP`, but not both. */ topP?: number; /** Only sample from the top K options for each subsequent token. Used to remove "long tail" low probability responses. Recommended for advanced use cases only. You usually only need to use temperature. */ topK?: number; /** Presence penalty setting. It affects the likelihood of the model to repeat information that is already in the prompt. The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition). 0 means no penalty. */ presencePenalty?: number; /** Frequency penalty setting. It affects the likelihood of the model to repeatedly use the same words or phrases. The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition). 0 means no penalty. */ frequencyPenalty?: number; /** Stop sequences. If set, the model will stop generating text when one of the stop sequences is generated. Providers may have limits on the number of stop sequences. */ stopSequences?: string[]; /** The seed (integer) to use for random sampling. If set and supported by the model, calls will generate deterministic results. */ seed?: number; /** Maximum number of retries. Set to 0 to disable retries. @default 2 */ maxRetries?: number; /** Abort signal. */ abortSignal?: AbortSignal; /** Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers. */ headers?: Record<string, string | undefined>; }; /** Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages. */ type Prompt = { /** System message to include in the prompt. Can be used with `prompt` or `messages`. */ system?: string; } & ({ /** A prompt. It can be either a text prompt or a list of messages. You can either use `prompt` or `messages` but not both. */ prompt: string | Array<ModelMessage>; /** A list of messages. You can either use `prompt` or `messages` but not both. */ messages?: never; } | { /** A list of messages. You can either use `prompt` or `messages` but not both. */ messages: Array<ModelMessage>; /** A prompt. It can be either a text prompt or a list of messages. You can either use `prompt` or `messages` but not both. */ prompt?: never; }); /** * Telemetry configuration. */ type TelemetrySettings = { /** * Enable or disable telemetry. Disabled by default while experimental. */ isEnabled?: boolean; /** * Enable or disable input recording. Enabled by default. * * You might want to disable input recording to avoid recording sensitive * information, to reduce data transfers, or to increase performance. */ recordInputs?: boolean; /** * Enable or disable output recording. Enabled by default. * * You might want to disable output recording to avoid recording sensitive * information, to reduce data transfers, or to increase performance. */ recordOutputs?: boolean; /** * Identifier for this function. Used to group telemetry data by function. */ functionId?: string; /** * Additional information to include in the telemetry data. */ metadata?: Record<string, AttributeValue>; /** * A custom tracer to use for the telemetry data. */ tracer?: Tracer; }; /** Embedding model that is used by the AI SDK Core functions. */ type EmbeddingModel<VALUE = string> = string | EmbeddingModelV2<VALUE>; /** Embedding. */ type Embedding = EmbeddingModelV2Embedding; /** Image model that is used by the AI SDK Core functions. */ type ImageModel = ImageModelV2; /** Warning from the model provider for this call. The call will proceed, but e.g. some settings might not be supported, which can lead to suboptimal results. */ type ImageGenerationWarning = ImageModelV2CallWarning; /** Metadata from the model provider for this call */ type ImageModelProviderMetadata = ImageModelV2ProviderMetadata; type ImageModelResponseMetadata = { /** Timestamp for the start of the generated response. */ timestamp: Date; /** The ID of the response model that was used to generate the response. */ modelId: string; /** Response headers. */ headers?: Record<string, string>; }; type JSONValue = JSONValue$1; /** Language model that is used by the AI SDK Core functions. */ type LanguageModel = string | LanguageModelV2; /** Reason why a language model finished generating a response. Can be one of the following: - `stop`: model generated stop sequence - `length`: model generated maximum number of tokens - `content-filter`: content filter violation stopped the model - `tool-calls`: model triggered tool calls - `error`: model stopped because of an error - `other`: model stopped for other reasons */ type FinishReason = LanguageModelV2FinishReason; /** Warning from the model provider for this call. The call will proceed, but e.g. some settings might not be supported, which can lead to suboptimal results. */ type CallWarning = LanguageModelV2CallWarning; /** A source that has been used as input to generate the response. */ type Source = LanguageModelV2Source; /** Tool choice for the generation. It supports the following settings: - `auto` (default): the model can choose whether and which tools to call. - `required`: the model must call a tool. It can choose which tool to call. - `none`: the model must not call tools - `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool */ type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | { type: 'tool'; toolName: Extract<keyof TOOLS, string>; }; type LanguageModelMiddleware = LanguageModelV2Middleware; type LanguageModelRequestMetadata = { /** Request HTTP body that was sent to the provider API. */ body?: unknown; }; type LanguageModelResponseMetadata = { /** ID for the generated response. */ id: string; /** Timestamp for the start of the generated response. */ timestamp: Date; /** The ID of the response model that was used to generate the response. */ modelId: string; /** Response headers (available only for providers that use HTTP requests). */ headers?: Record<string, string>; }; /** * Provider for language, text embedding, and image models. */ type Provider = { /** Returns the language model with the given id. The model id is then passed to the provider function to get the model. @param {string} id - The id of the model to return. @returns {LanguageModel} The language model associated with the id @throws {NoSuchModelError} If no such model exists. */ languageModel(modelId: string): LanguageModel; /** Returns the text embedding model with the given id. The model id is then passed to the provider function to get the model. @param {string} id - The id of the model to return. @returns {LanguageModel} The language model associated with the id @throws {NoSuchModelError} If no such model exists. */ textEmbeddingModel(modelId: string): EmbeddingModel<string>; /** Returns the image model with the given id. The model id is then passed to the provider function to get the model. @param {string} id - The id of the model to return. @returns {ImageModel} The image model associated with the id */ imageModel(modelId: string): ImageModel; }; /** Additional provider-specific metadata that is returned from the provider. This is needed to enable provider-specific functionality that can be fully encapsulated in the provider. */ type ProviderMetadata = SharedV2ProviderMetadata; /** Speech model that is used by the AI SDK Core functions. */ type SpeechModel = SpeechModelV2; /** Warning from the model provider for this call. The call will proceed, but e.g. some settings might not be supported, which can lead to suboptimal results. */ type SpeechWarning = SpeechModelV2CallWarning; type SpeechModelResponseMetadata = { /** Timestamp for the start of the generated response. */ timestamp: Date; /** The ID of the response model that was used to generate the response. */ modelId: string; /** Response headers. */ headers?: Record<string, string>; /** Response body. */ body?: unknown; }; /** Transcription model that is used by the AI SDK Core functions. */ type TranscriptionModel = TranscriptionModelV2; /** Warning from the model provider for this call. The call will proceed, but e.g. some settings might not be supported, which can lead to suboptimal results. */ type TranscriptionWarning = TranscriptionModelV2CallWarning; type TranscriptionModelResponseMetadata = { /** Timestamp for the start of the generated response. */ timestamp: Date; /** The ID of the response model that was used to generate the response. */ modelId: string; /** Response headers. */ headers?: Record<string, string>; }; /** Represents the number of tokens used in a prompt and completion. */ type LanguageModelUsage = LanguageModelV2Usage; /** Represents the number of tokens used in an embedding. */ type EmbeddingModelUsage = { /** The number of tokens used in the embedding. */ tokens: number; }; /** * Experimental. Can change in patch versions without warning. * * Download function. Called with the array of URLs and a boolean indicating * whether the URL is supported by the model. * * The download function can decide for each URL: * - to return null (which means that the URL should be passed to the model) * - to download the asset and return the data (incl. retries, authentication, etc.) * * Should throw DownloadError if the download fails. * * Should return an array of objects sorted by the order of the requested downloads. * For each object, the data should be a Uint8Array if the URL was downloaded. * For each object, the mediaType should be the media type of the downloaded asset. * For each object, the data should be null if the URL should be passed through as is. */ type DownloadFunction = (options: Array<{ url: URL; isUrlSupportedByModel: boolean; }>) => PromiseLike<Array<{ data: Uint8Array; mediaType: string | undefined; } | null>>; /** * A generated file. */ interface GeneratedFile { /** File as a base64 encoded string. */ readonly base64: string; /** File as a Uint8Array. */ readonly uint8Array: Uint8Array; /** The IANA media type of the file. @see https://www.iana.org/assignments/media-types/media-types.xhtml */ readonly mediaType: string; } /** Create a union of the given object's values, and optionally specify which keys to get the values from. Please upvote [this issue](https://github.com/microsoft/TypeScript/issues/31438) if you want to have this type as a built-in in TypeScript. @example ``` // data.json { 'foo': 1, 'bar': 2, 'biz': 3 } // main.ts import type {ValueOf} from 'type-fest'; import data = require('./data.json'); export function getData(name: string): ValueOf<typeof data> { return data[name]; } export function onlyBar(name: string): ValueOf<typeof data, 'bar'> { return data[name]; } // file.ts import {getData, onlyBar} from './main'; getData('foo'); //=> 1 onlyBar('foo'); //=> TypeError ... onlyBar('bar'); //=> 2 ``` * @see https://github.com/sindresorhus/type-fest/blob/main/source/value-of.d.ts */ type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType]; type ToolSet = Record<string, (Tool<never, never> | Tool<any, any> | Tool<any, never> | Tool<never, any>) & Pick<Tool<any, any>, 'execute' | 'onInputAvailable' | 'onInputStart' | 'onInputDelta'>>; type StaticToolCall<TOOLS extends ToolSet> = ValueOf<{ [NAME in keyof TOOLS]: { type: 'tool-call'; toolCallId: string; toolName: NAME & string; input: TOOLS[NAME] extends Tool<infer PARAMETERS> ? PARAMETERS : never; providerExecuted?: boolean; dynamic?: false | undefined; invalid?: false | undefined; error?: never; providerMetadata?: ProviderMetadata; }; }>; type DynamicToolCall = { type: 'tool-call'; toolCallId: string; toolName: string; input: unknown; providerExecuted?: boolean; dynamic: true; providerMetadata?: ProviderMetadata; /** * True if this is caused by an unparsable tool call or * a tool that does not exist. */ invalid?: boolean; /** * The error that caused the tool call to be invalid. */ error?: unknown; }; type TypedToolCall<TOOLS extends ToolSet> = StaticToolCall<TOOLS> | DynamicToolCall; type StaticToolError<TOOLS extends ToolSet> = ValueOf<{ [NAME in keyof TOOLS]: { type: 'tool-error'; toolCallId: string; toolName: NAME & string; input: InferToolInput<TOOLS[NAME]>; error: unknown; providerExecuted?: boolean; dynamic?: false | undefined; }; }>; type DynamicToolError = { type: 'tool-error'; toolCallId: string; toolName: string; input: unknown; error: unknown; providerExecuted?: boolean; dynamic: true; }; type TypedToolError<TOOLS extends ToolSet> = StaticToolError<TOOLS> | DynamicToolError; type StaticToolResult<TOOLS extends ToolSet> = ValueOf<{ [NAME in keyof TOOLS]: { type: 'tool-result'; toolCallId: string; toolName: NAME & string; input: InferToolInput<TOOLS[NAME]>; output: InferToolOutput<TOOLS[NAME]>; providerExecuted?: boolean; dynamic?: false | undefined; preliminary?: boolean; }; }>; type DynamicToolResult = { type: 'tool-result'; toolCallId: string; toolName: string; input: unknown; output: unknown; providerExecuted?: boolean; dynamic: true; preliminary?: boolean; }; type TypedToolResult<TOOLS extends ToolSet> = StaticToolResult<TOOLS> | DynamicToolResult; type ContentPart<TOOLS extends ToolSet> = { type: 'text'; text: string; providerMetadata?: ProviderMetadata; } | { type: 'reasoning'; text: string; providerMetadata?: ProviderMetadata; } | ({ type: 'source'; } & Source) | { type: 'file'; file: GeneratedFile; providerMetadata?: ProviderMetadata; } | ({ type: 'tool-call'; } & TypedToolCall<TOOLS> & { providerMetadata?: ProviderMetadata; }) | ({ type: 'tool-result'; } & TypedToolResult<TOOLS> & { providerMetadata?: ProviderMetadata; }) | ({ type: 'tool-error'; } & TypedToolError<TOOLS> & { providerMetadata?: ProviderMetadata; }); /** A message that was generated during the generation process. It can be either an assistant message or a tool message. */ type ResponseMessage = AssistantModelMessage | ToolModelMessage; /** * The result of a single step in the generation process. */ type StepResult<TOOLS extends ToolSet> = { /** The content that was generated in the last step. */ readonly content: Array<ContentPart<TOOLS>>; /** The generated text. */ readonly text: string; /** The reasoning that was generated during the generation. */ readonly reasoning: Array<ReasoningPart>; /** The reasoning text that was generated during the generation. */ readonly reasoningText: string | undefined; /** The files that were generated during the generation. */ readonly files: Array<GeneratedFile>; /** The sources that were used to generate the text. */ readonly sources: Array<Source>; /** The tool calls that were made during the generation. */ readonly toolCalls: Array<TypedToolCall<TOOLS>>; /** The static tool calls that were made in the last step. */ readonly staticToolCalls: Array<StaticToolCall<TOOLS>>; /** The dynamic tool calls that were made in the last step. */ readonly dynamicToolCalls: Array<DynamicToolCall>; /** The results of the tool calls. */ readonly toolResults: Array<TypedToolResult<TOOLS>>; /** The static tool results that were made in the last step. */ readonly staticToolResults: Array<StaticToolResult<TOOLS>>; /** The dynamic tool results that were made in the last step. */ readonly dynamicToolResults: Array<DynamicToolResult>; /** The reason why the generation finished. */ readonly finishReason: FinishReason; /** The token usage of the generated text. */ readonly usage: LanguageModelUsage; /** Warnings from the model provider (e.g. unsupported settings). */ readonly warnings: CallWarning[] | undefined; /** Additional request information. */ readonly request: LanguageModelRequestMetadata; /** Additional response information. */ readonly response: LanguageModelResponseMetadata & { /** The response messages that were generated during the call. Response messages can be either assistant messages or tool messages. They contain a generated id. */ readonly messages: Array<ResponseMessage>; /** Response body (available only for providers that use HTTP requests). */ body?: unknown; }; /** Additional provider-specific metadata. They are passed through from the provider to the AI SDK and enable provider-specific results that can be fully encapsulated in the provider. */ readonly providerMetadata: ProviderMetadata | undefined; }; /** The result of a `generateText` call. It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls. */ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> { /** The content that was generated in the last step. */ readonly content: Array<ContentPart<TOOLS>>; /** The text that was generated in the last step. */ readonly text: string; /** The full reasoning that the model has generated in the last step. */ readonly reasoning: Array<ReasoningPart>; /** The reasoning text that the model has generated in the last step. Can be undefined if the model has only generated text. */ readonly reasoningText: string | undefined; /** The files that were generated in the last step. Empty array if no files were generated. */ readonly files: Array<GeneratedFile>; /** Sources that have been used as references in the last step. */ readonly sources: Array<Source>; /** The tool calls that were made in the last step. */ readonly toolCalls: Array<TypedToolCall<TOOLS>>; /** The static tool calls that were made in the last step. */ readonly staticToolCalls: Array<StaticToolCall<TOOLS>>; /** The dynamic tool calls that were made in the last step. */ readonly dynamicToolCalls: Array<DynamicToolCall>; /** The results of the tool calls from the last step. */ readonly toolResults: Array<TypedToolResult<TOOLS>>; /** The static tool results that were made in the last step. */ readonly staticToolResults: Array<StaticToolResult<TOOLS>>; /** The dynamic tool results that were made in the last step. */ readonly dynamicToolResults: Array<DynamicToolResult>; /** The reason why the generation finished. */ readonly finishReason: FinishReason; /** The token usage of the last step. */ readonly usage: LanguageModelUsage; /** The total token usage of all steps. When there are multiple steps, the usage is the sum of all step usages. */ readonly totalUsage: LanguageModelUsage; /** Warnings from the model provider (e.g. unsupported settings) */ readonly warnings: CallWarning[] | undefined; /** Additional request information. */ readonly request: LanguageModelRequestMetadata; /** Additional response information. */ readonly response: LanguageModelResponseMetadata & { /** The response messages that were generated during the call. It consists of an assistant message, potentially containing tool calls. When there are tool results, there is an additional tool message with the tool results that are available. If there are tools that do not have execute functions, they are not included in the tool results and need to be added separately. */ messages: Array<ResponseMessage>; /** Response body (available only for providers that use HTTP requests). */ body?: unknown; }; /** Additional provider-specific metadata. They are passed through from the provider to the AI SDK and enable provider-specific results that can be fully encapsulated in the provider. */ readonly providerMetadata: ProviderMetadata | undefined; /** Details for all steps. You can use this to get information about intermediate steps, such as the tool calls or the response headers. */ readonly steps: Array<StepResult<TOOLS>>; /** The generated structured output. It uses the `experimental_output` specification. */ readonly experimental_output: OUTPUT; } /** Create a type from an object with all keys and nested keys set to optional. The helper supports normal objects and Zod schemas (which are resolved automatically). It always recurses into arrays. Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep. */ type DeepPartial<T> = T extends z3.ZodTypeAny ? DeepPartialInternal<z3.infer<T>> : T extends z4.core.$ZodType ? DeepPartialInternal<z4.infer<T>> : DeepPartialInternal<T>; type DeepPartialInternal<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartialInternal<ItemType | undefined>> : Array<DeepPartialInternal<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown; type PartialMap<KeyType, ValueType> = {} & Map<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>; type PartialSet<T> = {} & Set<DeepPartialInternal<T>>; type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>; type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartialInternal<T>>; type PartialObject<ObjectType extends object> = { [KeyType in keyof ObjectType]?: DeepPartialInternal<ObjectType[KeyType]>; }; interface Output<OUTPUT, PARTIAL> { readonly type: 'object' | 'text'; responseFormat: LanguageModelV2CallOptions['responseFormat']; parsePartial(options: { text: string; }): Promise<{ partial: PARTIAL; } | undefined>; parseOutput(options: { text: string; }, context: { response: LanguageModelResponseMetadata; usage: LanguageModelUsage; finishReason: FinishReason; }): Promise<OUTPUT>; } declare const text: () => Output<string, string>; declare const object: <OUTPUT>({ schema: inputSchema, }: { schema: z4.core.$ZodType<OUTPUT, any> | z3.Schema<OUTPUT, z3.ZodTypeDef, any> | Schema<OUTPUT>; }) => Output<OUTPUT, DeepPartial<OUTPUT>>; type output_Output<OUTPUT, PARTIAL> = Output<OUTPUT, PARTIAL>; declare const output_object: typeof object; declare const output_text: typeof text; declare namespace output { export { output_Output as Output, output_object as object, output_text as text, }; } /** Function that you can use to provide different settings for a step. @param options - The options for the step. @param options.steps - The steps that have been executed so far. @param options.stepNumber - The number of the step that is being executed. @param options.model - The model that is being used. @returns An object that contains the settings for the step. If you return undefined (or for undefined settings), the settings from the outer level will be used. */ type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Tool>> = (options: { steps: Array<StepResult<NoInfer<TOOLS>>>; stepNumber: number; model: LanguageModel; messages: Array<ModelMessage>; }) => PromiseLike<PrepareStepResult<TOOLS>> | PrepareStepResult<TOOLS>; type PrepareStepResult<TOOLS extends Record<string, Tool> = Record<string, Tool>> = { model?: LanguageModel; toolChoice?: ToolChoice<NoInfer<TOOLS>>; activeTools?: Array<keyof NoInfer<TOOLS>>; system?: string; messages?: Array<ModelMessage>; } | undefined; type StopCondition<TOOLS extends ToolSet> = (options: { steps: Array<StepResult<TOOLS>>; }) => PromiseLike<boolean> | boolean; declare function stepCountIs(stepCount: number): StopCondition<any>; declare function hasToolCall(toolName: string): StopCondition<any>; declare const symbol$f: unique symbol; declare class InvalidToolInputError extends AISDKError { private readonly [symbol$f]; readonly toolName: string; readonly toolInput: string; constructor({ toolInput, toolName, cause, message, }: { message?: string; toolInput: string; toolName: string; cause: unknown; }); static isInstance(error: unknown): error is InvalidToolInputError; } declare const symbol$e: unique symbol; declare class NoSuchToolError extends AISDKError { private readonly [symbol$e]; readonly toolName: string; readonly availableTools: string[] | undefined; constructor({ toolName, availableTools, message, }: { toolName: string; availableTools?: string[] | undefined; message?: string; }); static isInstance(error: unknown): error is NoSuchToolError; } /** @deprecated Use `SystemModelMessage` instead. */ type CoreSystemMessage = SystemModelMessage; declare const systemModelMessageSchema: z.ZodType<SystemModelMessage>; /** @deprecated Use `systemModelMessageSchema` instead. */ declare const coreSystemMessageSchema: z.ZodType<SystemModelMessage, unknown, z.core.$ZodTypeInternals<SystemModelMessage, unknown>>; /** @deprecated Use `UserModelMessage` instead. */ type CoreUserMessage = UserModelMessage; declare const userModelMessageSchema: z.ZodType<UserModelMessage>; /** @deprecated Use `userModelMessageSchema` instead. */ declare const coreUserMessageSchema: z.ZodType<UserModelMessage, unknown, z.core.$ZodTypeInternals<UserModelMessage, unknown>>; /** @deprecated Use `AssistantModelMessage` instead. */ type CoreAssistantMessage = AssistantModelMessage; declare const assistantModelMessageSchema: z.ZodType<AssistantModelMessage>; /** @deprecated Use `assistantModelMessageSchema` instead. */ declare const coreAssistantMessageSchema: z.ZodType<AssistantModelMessage, unknown, z.core.$ZodTypeInternals<AssistantModelMessage, unknown>>; /** @deprecated Use `ToolModelMessage` instead. */ type CoreToolMessage = ToolModelMessage; declare const toolModelMessageSchema: z.ZodType<ToolModelMessage>; /** @deprecated Use `toolModelMessageSchema` instead. */ declare const coreToolMessageSchema: z.ZodType<ToolModelMessage, unknown, z.core.$ZodTypeInternals<ToolModelMessage, unknown>>; /** @deprecated Use `ModelMessage` instead. */ type CoreMessage = ModelMessage; declare const modelMessageSchema: z.ZodType<ModelMessage>; /** @deprecated Use `modelMessageSchema` instead. */ declare const coreMessageSchema: z.ZodType<CoreMessage>; /** * A function that attempts to repair a tool call that failed to parse. * * It receives the error and the context as arguments and returns the repair * tool call JSON as text. * * @param options.system - The system prompt. * @param options.messages - The messages in the current generation step. * @param options.toolCall - The tool call that failed to parse. * @param options.tools - The tools that are available. * @param options.inputSchema - A function that returns the JSON Schema for a tool. * @param options.error - The error that occurred while parsing the tool call. */ type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: { system: string | undefined; messages: ModelMessage[]; toolCall: LanguageModelV2ToolCall; tools: TOOLS; inputSchema: (options: { toolName: string; }) => JSONSchema7; error: NoSuchToolError | InvalidToolInputError; }) => Promise<LanguageModelV2ToolCall | null>; /** Callback that is set using the `onStepFinish` option. @param stepResult - The result of the step. */ type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void; /** Generate a text and call tools for a given prompt using a language model. This function does not stream the output. If you want to stream the output, use `streamText` instead. @param model - The language model to use. @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools. @param toolChoice - The tool choice strategy. Default: 'auto'. @param system - A system message that will be part of the prompt. @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both. @param messages - A list of messages. You can either use `prompt` or `messages` but not both. @param maxOutputTokens - Maximum number of tokens to generate. @param temperature - Temperature setting. The value is passed through to the provider. The range depends on the provider and model. It is recommended to set either `temperature` or `topP`, but not both. @param topP - Nucleus sampling. The value is passed through to the provider. The range depends on the provider and model. It is recommended to set either `temperature` or `topP`, but not both. @param topK - Only sample from the top K options for each subsequent token. Used to remove "long tail" low probability responses. Recommended for advanced use cases only. You usually only need to use temperature. @param presencePenalty - Presence penalty setting. It affects the likelihood of the model to repeat information that is already in the prompt. The value is passed through to the provider. The range depends on the provider and model. @param frequencyPenalty - Frequency penalty setting. It affects the likelihood of the model to repeatedly use the same words or phrases. The value is passed through to the provider. The range depends on the provider and model. @param stopSequences - Stop sequences. If set, the model will stop generating text when one of the stop sequences is generated. @param seed - The seed (integer) to use for random sampling. If set and supported by the model, calls will generate deterministic results. @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2. @param abortSignal - An optional abort signal that can be used to cancel the call. @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers. @param experimental_generateMessageId - Generate a unique ID for each message. @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps. @returns A result object that contains the generated text, the results of the tool calls, and additional information. */ declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & { /** The language model to use. */ model: LanguageModel; /** The tools that the model can call. The model needs to support calling tools. */ tools?: TOOLS; /** The tool choice strategy. Default: 'auto'. */ toolChoice?: ToolChoice<NoInfer<TOOLS>>; /** Condition for stopping the generation when there are tool results in the last step. When the condition is an array, any of the conditions can be met to stop the generation. @default stepCountIs(1) */ stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>; /** Optional telemetry configuration (experimental). */ experimental_telemetry?: TelemetrySettings; /** Additional provider-specific options. They are passed through to the provider from the AI SDK and enable provider-specific functionality that can be fully encapsulated in the provider. */ providerOptions?: ProviderOptions; /** * @deprecated Use `activeTools` instead. */ experimental_activeTools?: Array<keyof NoInfer<TOOLS>>; /** Limits the tools that are available for the model to call without changing the tool call and result types in the result. */ activeTools?: Array<keyof NoInfer<TOOLS>>; /** Optional specification for parsing structured outputs from the LLM response. */ experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>; /** Custom download function to use for URLs. By default, files are downloaded if the model does not support the URL for the given media type. */ experimental_download?: DownloadFunction | undefined; /** * @deprecated Use `prepareStep` instead. */ experimental_prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>; /** Optional function that you can use to provide different settings for a step. */ prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>; /** A function that attempts to repair a tool call that failed to parse. */ experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>; /** Callback that is called when each step (LLM call) is finished, including intermediate steps. */ onStepFinish?: GenerateTextOnStepFinishCallback<NoInfer<TOOLS>>; /** * Context that is passed into tool execution. * * Experimental (can break in patch releases). * * @default undefined */ experimental_context?: unknown; /** * Internal. For test use only. May change without notice. */ _internal?: { generateId?: IdGenerator; currentDate?: () => Date; }; }): Promise<GenerateTextResult<TOOLS, OUTPUT>>; /** * Detects the first chunk in a buffer. * * @param buffer - The buffer to detect the first chunk in. * * @returns The first detected chunk, or `undefined` if no chunk was detected. */ type ChunkDetector = (buffer: string) => string | undefined | null; /** * Smooths text streaming output. * * @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay. * @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom RegExp pattern for custom chunking. * * @returns A transform stream that smooths text streaming output. */ declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _internal: { delay }, }?: { delayInMs?: number | null; chunking?: 'word' | 'line' | RegExp | ChunkDetector; /** * Internal. For test use only. May change without notice. */ _internal?: { delay?: (delayInMs: number | null) => Promise<void>; }; }): (options: { tools: TOOLS; }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>; /** A transformation that is applied to the stream. @param stopStream - A function that stops the source stream. @param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools. */ type StreamTextTransform<TOOLS extends ToolSet> = (options: { tools: TOOLS; stopStream: () => void; }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>; /** Callback that is set using the `onError` option. @param event - The event that is passed to the callback. */ type StreamTextOnErrorCallback = (event: { error: unknown; }) => PromiseLike<void> | void; /** Callback that is set using the `onStepFinish` option. @param stepResult - The result of the step. */ type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => PromiseLike<void> | void; /** Callback that is set using the `onChunk` option. @param event - The event that is passed to the callback. */ type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: { chunk: Extract<TextStreamPart<TOOLS>, { type: 'text-delta' | 'reasoning-delta' | 'source' | 'tool-call' | 'tool-input-start' | 'tool-input-delta' | 'tool-result' | 'raw'; }>; }) => PromiseLike<void> | void; /** Callback that is set using the `onFinish` option. @param event - The event that is passed to the callback. */ type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & { /** Details for all steps. */ readonly steps: StepResult<TOOLS>[]; /** Total usage for all steps. This is the sum of the usage of all steps. */ readonly totalUsage: LanguageModelUsage; }) => PromiseLike<void> | void; /** Callback that is set using the `onAbort` option. @param event - The event that is passed to the callback. */ type StreamTextOnAbortCallback<TOOLS extends ToolSet> = (event: { /** Details for all previously finished steps. */ readonly steps: StepResult<TOOLS>[]; }) => PromiseLike<void> | void; /** Generate a text and call tools for a given prompt using a language model. This function streams the output. If you do not want to stream the output, use `generateText` instead. @param model - The language model to use. @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools. @param system - A system message that will be part of the prompt. @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both. @param messages - A list of messages. You can either use `prompt` or `messages` but not both. @param maxOutputTokens - Maximum number of tokens to generate. @param temperature - Temperature setting. The value is passed through to the provider. The range depends on the provider and model. It is recommended to set either `temperature` or `topP`, but not both. @param topP - Nucleus sampling. The value is passed through to the provider. The range depends on the provider and model. It is recommended to set either `temperature` or `topP`, but not both. @param topK - Only sample from the top K options for each subsequent token. Used to remove "long tail" low probability responses. Recommended for advanced use cases only. You usually only need to use temperature. @param presencePenalty - Presence penalty setting. It affects the likelihood of the model to repeat information that is already in the prompt. The value is passed through to the provider. The range depends on the provider and model. @param frequencyPenalty - Frequency penalty setting. It affects the likelihood of the model to repeatedly use the same words or phrases. The value is passed through to the provider. The range depends on the provider and model. @param stopSequences - Stop sequences. If set, the model will stop generating text when one of the stop sequences is generated. @param seed - The seed (integer) to use for random sampling. If set and supported by the model, calls will generate deterministic results. @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2. @param abortSignal - An optional abort signal that can be used to cancel the call. @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers. @param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved. @param onError - Callback that is called when an error occurs during streaming. You can use it to log errors. @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps. @param onFinish - Callback that is called when the LLM response and all request tool executions (for tools that have an `execute` function) are finished. @return A result object for accessing different stream types and additional information. */ declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, experimental_download: download, includeRawChunks, onChunk, onError, onFinish, onAbort, onStepFinish, experimental_context, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & { /** The language model to use. */ model: LanguageModel; /** The tools that the model can call. The model needs to support calling tools. */ tools?: TOOLS; /** The tool choice strategy. Default: 'auto'. */ toolChoice?: ToolChoice<TOOLS>; /** Condition for stopping the generation when there are tool results in the last step. When the condition is an array, any of the conditions can be met to stop the generation. @default stepCountIs(1) */ stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>; /** Optional telemetry configuration (experimental). */ experimental_telemetry?: TelemetrySettings; /** Additional provider-specific options. They are passed through to the provider from the AI SDK and enable provider-specific functionality that can be fully encapsulated in the provider. */ providerOptions?: ProviderOptions; /** * @deprecated Use `activeTools` instead. */ experimental_activeTools?: Array<keyof NoInfer<TOOLS>>; /** Limits the tools that are available for the model to call without changing the tool call and result types in the result. */ activeTools?: Array<keyof NoInfer<TOOLS>>; /** Optional specification for parsing structured outputs from the LLM response. */ experimental_output?: Output<OUTPUT, PARTIAL_OUTPUT>; /** Optional function that you can use to provide different settings for a step. @param options - The options for the step. @param options.steps - The steps that have been executed so far. @param options.stepNumber - The number of the step that is being executed. @param options.model - The model that is being used. @returns An object that contains the settings for the step. If you return undefined (or for undefined settings), the settings from the outer level will be used. */ prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>; /** A function that attempts to repair a tool call that failed to parse. */ experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>; /** Optional stream transformations. They are applied in the order they are provided. The stream transformations must maintain the stream structure for streamText to work correctly. */ experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>; /** Custom download function to use for URLs. By default, files are downloaded if the model does not support the URL for the given media type. */ experimental_download?: DownloadFunction | undefined; /** Whether to include raw chunks from the provider in the stream. When enabled, you will receive raw chunks with type 'raw' that contain the unprocessed data from the provider. This allows access to cutting-edge provider features not yet wrapped by the AI SDK. Defaults to false. */ includeRawChunks?: boolean; /** Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved. */ onChunk?: StreamTextOnChunkCallback<TOOLS>; /** Callback that is invoked when an error occurs during streaming. You can use it to log errors. The stream processing will pause until the callback promise is resolved. */ onError?: StreamTextOnErrorCallback; /** Callback that is called when the LLM response and all request tool executions (for tools that have an `execute` function) are finished. The usage is the combined usage of all steps. */ onFinish?: StreamTextOnFinishCallback<TOOLS>; onAbort?: StreamTextOnAbortCallback<TOOLS>; /** Callback that is called when each step (LLM call) is finished, including intermediate steps. */ onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>; /** * Context that is passed into tool execution. * * Experimental (can break in patch releases). * * @default undefined */ experimental_context?: unknown; /** Internal. For test use only. May change without notice. */ _internal?: { now?: () => number; generateId?: IdGenerator; currentDate?: () => Date; }; }): StreamTextResult<TOOLS, PARTIAL_OUTPUT>; /** The data types that can be used in the UI message for the UI message data parts. */ type UIDataTypes = Record<string, unknown>; type UITool = { input: unknown; output: unknown | undefined; }; /** * Infer the input and output types of a tool so it can be used as a UI tool. */ type InferUITool<TOOL extends Tool> = { input: InferToolInput<TOOL>; output: InferToolOutput<TOOL>; }; /** * Infer the input and output types of a tool set so it can be used as a UI tool set. */ type InferUITools<TOOLS extends ToolSet> = { [NAME in keyof TOOLS & string]: InferUITool<TOOLS[NAME]>; }; type UITools = Record<string, UITool>; /** AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes. */ interface UIMessage<METADATA = unknown, DATA_PARTS extends UIDataTypes = UIDataTypes, TOOLS extends UITools = UITools> { /** A unique identifier for the message. */ id: string; /** The role of the message. */ role: 'system' | 'user' | 'assistant'; /** The metadata of the message. */ metadata?: METADATA; /** The parts of the message. Use this for rendering the message in the UI. System messages should be avoided (set the system prompt on the server instead). They can have text parts. User messages can have text parts and file parts. As