ai
Version:
AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript
1,431 lines (1,384 loc) • 228 kB
text/typescript
import { GatewayModelId } from '@ai-sdk/gateway';
export { GatewayModelId, createGateway, gateway } from '@ai-sdk/gateway';
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
import { Tool, InferToolInput, InferToolOutput, FlexibleSchema, InferSchema, SystemModelMessage, ModelMessage, AssistantModelMessage, ToolModelMessage, ReasoningPart, ProviderOptions, UserModelMessage, IdGenerator, ToolCall, MaybePromiseLike, TextPart, FilePart, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
export { AssistantContent, AssistantModelMessage, DataContent, DownloadError, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolExecutionOptions, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
import * as _ai_sdk_provider from '@ai-sdk/provider';
import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, EmbeddingModelV3Middleware, ImageModelV3, ImageModelV2, ImageModelV3ProviderMetadata, ImageModelV2ProviderMetadata, ImageModelV3Middleware, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, SharedV3Warning, LanguageModelV3Source, LanguageModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, TranscriptionModelV3, TranscriptionModelV2, JSONObject, ImageModelV3Usage, AISDKError, LanguageModelV3ToolCall, JSONSchema7, LanguageModelV3CallOptions, JSONParseError, TypeValidationError, Experimental_VideoModelV3, EmbeddingModelV3CallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, LoadSettingError, NoContentGeneratedError, NoSuchModelError, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
import { AttributeValue, Tracer } from '@opentelemetry/api';
import { ServerResponse } from 'node:http';
import { ServerResponse as ServerResponse$1 } from 'http';
import { z } from 'zod/v4';
/**
* Embedding model that is used by the AI SDK.
*/
type EmbeddingModel = string | EmbeddingModelV3 | EmbeddingModelV2<string>;
/**
* Embedding.
*/
type Embedding = EmbeddingModelV3Embedding;
type EmbeddingModelMiddleware = EmbeddingModelV3Middleware;
/**
* Image model that is used by the AI SDK.
*/
type ImageModel = string | ImageModelV3 | ImageModelV2;
/**
* Metadata from the model provider for this call.
*/
type ImageModelProviderMetadata = ImageModelV3ProviderMetadata | ImageModelV2ProviderMetadata;
type ImageModelMiddleware = ImageModelV3Middleware;
type ImageModelResponseMetadata = {
/**
* Timestamp for the start of the generated response.
*/
timestamp: Date;
/**
* The ID of the response model that was used to generate the response.
*/
modelId: string;
/**
* Response headers.
*/
headers?: Record<string, string>;
};
type JSONValue = JSONValue$1;
declare global {
/**
* Global interface that can be augmented by third-party packages to register custom model IDs.
*
* You can register model IDs in two ways:
*
* 1. Register based on Model IDs from a provider package:
* @example
* ```typescript
* import { openai } from '@ai-sdk/openai';
* type OpenAIResponsesModelId = Parameters<typeof openai>[0];
*
* declare global {
* interface RegisteredProviderModels {
* openai: OpenAIResponsesModelId;
* }
* }
* ```
*
* 2. Register individual model IDs directly as keys:
* @example
* ```typescript
* declare global {
* interface RegisteredProviderModels {
* 'my-provider:my-model': any;
* 'my-provider:another-model': any;
* }
* }
* ```
*/
interface RegisteredProviderModels {
}
}
/**
* Global provider model ID type that defaults to GatewayModelId but can be augmented
* by third-party packages via declaration merging.
*/
type GlobalProviderModelId = [keyof RegisteredProviderModels] extends [
never
] ? GatewayModelId : keyof RegisteredProviderModels | RegisteredProviderModels[keyof RegisteredProviderModels];
/**
* Language model that is used by the AI SDK.
*/
type LanguageModel = GlobalProviderModelId | LanguageModelV3 | LanguageModelV2;
/**
* Reason why a language model finished generating a response.
*
* Can be one of the following:
* - `stop`: model generated stop sequence
* - `length`: model generated maximum number of tokens
* - `content-filter`: content filter violation stopped the model
* - `tool-calls`: model triggered tool calls
* - `error`: model stopped because of an error
* - `other`: model stopped for other reasons
*/
type FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
/**
* Warning from the model provider for this call. The call will proceed, but e.g.
* some settings might not be supported, which can lead to suboptimal results.
*/
type CallWarning = SharedV3Warning;
/**
* A source that has been used as input to generate the response.
*/
type Source = LanguageModelV3Source;
/**
* Tool choice for the generation. It supports the following settings:
*
* - `auto` (default): the model can choose whether and which tools to call.
* - `required`: the model must call a tool. It can choose which tool to call.
* - `none`: the model must not call tools
* - `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
*/
type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
type: 'tool';
toolName: Extract<keyof TOOLS, string>;
};
type LanguageModelMiddleware = LanguageModelV3Middleware;
type LanguageModelRequestMetadata = {
/**
* Request HTTP body that was sent to the provider API.
*/
body?: unknown;
};
type LanguageModelResponseMetadata = {
/**
* ID for the generated response.
*/
id: string;
/**
* Timestamp for the start of the generated response.
*/
timestamp: Date;
/**
* The ID of the response model that was used to generate the response.
*/
modelId: string;
/**
* Response headers (available only for providers that use HTTP requests).
*/
headers?: Record<string, string>;
};
/**
* Reranking model that is used by the AI SDK.
*/
type RerankingModel = RerankingModelV3;
/**
* Provider for language, text embedding, and image models.
*/
type Provider = {
/**
* Returns the language model with the given id.
* The model id is then passed to the provider function to get the model.
*
* @param {string} modelId - The id of the model to return.
*
* @returns {LanguageModel} The language model associated with the id
*
* @throws {NoSuchModelError} If no such model exists.
*/
languageModel(modelId: string): LanguageModel;
/**
* Returns the text embedding model with the given id.
* The model id is then passed to the provider function to get the model.
*
* @param {string} modelId - The id of the model to return.
*
* @returns {EmbeddingModel} The embedding model associated with the id
*
* @throws {NoSuchModelError} If no such model exists.
*/
embeddingModel(modelId: string): EmbeddingModel;
/**
* Returns the image model with the given id.
* The model id is then passed to the provider function to get the model.
*
* @param {string} modelId - The id of the model to return.
*
* @returns {ImageModel} The image model associated with the id
*/
imageModel(modelId: string): ImageModel;
/**
* Returns the reranking model with the given id.
* The model id is then passed to the provider function to get the model.
*
* @param {string} modelId - The id of the model to return.
*
* @returns {RerankingModel} The reranking model associated with the id
*
* @throws {NoSuchModelError} If no such model exists.
*/
rerankingModel(modelId: string): RerankingModel;
};
/**
* Additional provider-specific metadata that is returned from the provider.
*
* This is needed to enable provider-specific functionality that can be
* fully encapsulated in the provider.
*/
type ProviderMetadata = SharedV3ProviderMetadata;
/**
* Speech model that is used by the AI SDK.
*/
type SpeechModel = string | SpeechModelV3 | SpeechModelV2;
type SpeechModelResponseMetadata = {
/**
* Timestamp for the start of the generated response.
*/
timestamp: Date;
/**
* The ID of the response model that was used to generate the response.
*/
modelId: string;
/**
* Response headers.
*/
headers?: Record<string, string>;
/**
* Response body.
*/
body?: unknown;
};
/**
* Transcription model that is used by the AI SDK.
*/
type TranscriptionModel = string | TranscriptionModelV3 | TranscriptionModelV2;
type TranscriptionModelResponseMetadata = {
/**
* Timestamp for the start of the generated response.
*/
timestamp: Date;
/**
* The ID of the response model that was used to generate the response.
*/
modelId: string;
/**
* Response headers.
*/
headers?: Record<string, string>;
};
/**
* Represents the number of tokens used in a prompt and completion.
*/
type LanguageModelUsage = {
/**
* The total number of input (prompt) tokens used.
*/
inputTokens: number | undefined;
/**
* Detailed information about the input tokens.
*/
inputTokenDetails: {
/**
* The number of non-cached input (prompt) tokens used.
*/
noCacheTokens: number | undefined;
/**
* The number of cached input (prompt) tokens read.
*/
cacheReadTokens: number | undefined;
/**
* The number of cached input (prompt) tokens written.
*/
cacheWriteTokens: number | undefined;
};
/**
* The number of total output (completion) tokens used.
*/
outputTokens: number | undefined;
/**
* Detailed information about the output tokens.
*/
outputTokenDetails: {
/**
* The number of text tokens used.
*/
textTokens: number | undefined;
/**
* The number of reasoning tokens used.
*/
reasoningTokens: number | undefined;
};
/**
* The total number of tokens used.
*/
totalTokens: number | undefined;
/**
* @deprecated Use outputTokenDetails.reasoningTokens instead.
*/
reasoningTokens?: number | undefined;
/**
* @deprecated Use inputTokenDetails.cacheReadTokens instead.
*/
cachedInputTokens?: number | undefined;
/**
* Raw usage information from the provider.
*
* This is the usage information in the shape that the provider returns.
* It can include additional information that is not part of the standard usage information.
*/
raw?: JSONObject;
};
/**
* Represents the number of tokens used in an embedding.
*/
type EmbeddingModelUsage = {
/**
* The number of tokens used in the embedding.
*/
tokens: number;
};
/**
* Usage information for an image model call.
*/
type ImageModelUsage = ImageModelV3Usage;
/**
* Warning from the model provider for this call. The call will proceed, but e.g.
* some settings might not be supported, which can lead to suboptimal results.
*/
type Warning = SharedV3Warning;
/**
* A generated file.
*/
interface GeneratedFile {
/**
* File as a base64 encoded string.
*/
readonly base64: string;
/**
* File as a Uint8Array.
*/
readonly uint8Array: Uint8Array;
/**
* The IANA media type of the file.
*
* @see https://www.iana.org/assignments/media-types/media-types.xhtml
*/
readonly mediaType: string;
}
declare class DefaultGeneratedFile implements GeneratedFile {
private base64Data;
private uint8ArrayData;
readonly mediaType: string;
constructor({ data, mediaType, }: {
data: string | Uint8Array;
mediaType: string;
});
get base64(): string;
get uint8Array(): Uint8Array<ArrayBufferLike>;
}
/**
* Create a union of the given object's values, and optionally specify which keys to get the values from.
*
* Please upvote [this issue](https://github.com/microsoft/TypeScript/issues/31438) if you want to have this type as a built-in in TypeScript.
*
* @example
* ```
* // data.json
* {
* 'foo': 1,
* 'bar': 2,
* 'biz': 3
* }
*
* // main.ts
* import type {ValueOf} from 'type-fest';
* import data = require('./data.json');
*
* export function getData(name: string): ValueOf<typeof data> {
* return data[name];
* }
*
* export function onlyBar(name: string): ValueOf<typeof data, 'bar'> {
* return data[name];
* }
*
* // file.ts
* import {getData, onlyBar} from './main';
*
* getData('foo');
* //=> 1
*
* onlyBar('foo');
* //=> TypeError ...
*
* onlyBar('bar');
* //=> 2
* ```
* @see https://github.com/sindresorhus/type-fest/blob/main/source/value-of.d.ts
*/
type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
type ToolSet = Record<string, (Tool<never, never> | Tool<any, any> | Tool<any, never> | Tool<never, any>) & Pick<Tool<any, any>, 'execute' | 'onInputAvailable' | 'onInputStart' | 'onInputDelta' | 'needsApproval'>>;
type BaseToolCall = {
type: 'tool-call';
toolCallId: string;
providerExecuted?: boolean;
providerMetadata?: ProviderMetadata;
};
type StaticToolCall<TOOLS extends ToolSet> = ValueOf<{
[NAME in keyof TOOLS]: BaseToolCall & {
toolName: NAME & string;
input: TOOLS[NAME] extends Tool<infer PARAMETERS> ? PARAMETERS : never;
dynamic?: false | undefined;
invalid?: false | undefined;
error?: never;
title?: string;
};
}>;
type DynamicToolCall = BaseToolCall & {
toolName: string;
input: unknown;
dynamic: true;
title?: string;
/**
* True if this is caused by an unparsable tool call or
* a tool that does not exist.
*/
invalid?: boolean;
/**
* The error that caused the tool call to be invalid.
*/
error?: unknown;
};
type TypedToolCall<TOOLS extends ToolSet> = StaticToolCall<TOOLS> | DynamicToolCall;
/**
* Output part that indicates that a tool approval request has been made.
*
* The tool approval request can be approved or denied in the next tool message.
*/
type ToolApprovalRequestOutput<TOOLS extends ToolSet> = {
type: 'tool-approval-request';
/**
* ID of the tool approval request.
*/
approvalId: string;
/**
* Tool call that the approval request is for.
*/
toolCall: TypedToolCall<TOOLS>;
};
/**
* Reasoning output of a text generation. It contains a reasoning.
*/
interface ReasoningOutput {
type: 'reasoning';
/**
* The reasoning text.
*/
text: string;
/**
* Additional provider-specific metadata. They are passed through
* to the provider from the AI SDK and enable provider-specific
* functionality that can be fully encapsulated in the provider.
*/
providerMetadata?: ProviderMetadata;
}
type StaticToolError<TOOLS extends ToolSet> = ValueOf<{
[NAME in keyof TOOLS]: {
type: 'tool-error';
toolCallId: string;
toolName: NAME & string;
input: InferToolInput<TOOLS[NAME]>;
error: unknown;
providerExecuted?: boolean;
providerMetadata?: ProviderMetadata;
dynamic?: false | undefined;
title?: string;
};
}>;
type DynamicToolError = {
type: 'tool-error';
toolCallId: string;
toolName: string;
input: unknown;
error: unknown;
providerExecuted?: boolean;
providerMetadata?: ProviderMetadata;
dynamic: true;
title?: string;
};
type TypedToolError<TOOLS extends ToolSet> = StaticToolError<TOOLS> | DynamicToolError;
type StaticToolResult<TOOLS extends ToolSet> = ValueOf<{
[NAME in keyof TOOLS]: {
type: 'tool-result';
toolCallId: string;
toolName: NAME & string;
input: InferToolInput<TOOLS[NAME]>;
output: InferToolOutput<TOOLS[NAME]>;
providerExecuted?: boolean;
providerMetadata?: ProviderMetadata;
dynamic?: false | undefined;
preliminary?: boolean;
title?: string;
};
}>;
type DynamicToolResult = {
type: 'tool-result';
toolCallId: string;
toolName: string;
input: unknown;
output: unknown;
providerExecuted?: boolean;
providerMetadata?: ProviderMetadata;
dynamic: true;
preliminary?: boolean;
title?: string;
};
type TypedToolResult<TOOLS extends ToolSet> = StaticToolResult<TOOLS> | DynamicToolResult;
type ContentPart<TOOLS extends ToolSet> = {
type: 'text';
text: string;
providerMetadata?: ProviderMetadata;
} | ReasoningOutput | ({
type: 'source';
} & Source) | {
type: 'file';
file: GeneratedFile;
providerMetadata?: ProviderMetadata;
} | ({
type: 'tool-call';
} & TypedToolCall<TOOLS> & {
providerMetadata?: ProviderMetadata;
}) | ({
type: 'tool-result';
} & TypedToolResult<TOOLS> & {
providerMetadata?: ProviderMetadata;
}) | ({
type: 'tool-error';
} & TypedToolError<TOOLS> & {
providerMetadata?: ProviderMetadata;
}) | ToolApprovalRequestOutput<TOOLS>;
/**
* Create a type from an object with all keys and nested keys set to optional.
* The helper supports normal objects and schemas (which are resolved automatically).
* It always recurses into arrays.
*
* Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
*/
type DeepPartial<T> = T extends FlexibleSchema ? DeepPartialInternal<InferSchema<T>> : DeepPartialInternal<T>;
type DeepPartialInternal<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartialInternal<ItemType | undefined>> : Array<DeepPartialInternal<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown;
type PartialMap<KeyType, ValueType> = {} & Map<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
type PartialSet<T> = {} & Set<DeepPartialInternal<T>>;
type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartialInternal<T>>;
type PartialObject<ObjectType extends object> = {
[KeyType in keyof ObjectType]?: DeepPartialInternal<ObjectType[KeyType]>;
};
/**
* Timeout configuration for API calls. Can be specified as:
* - A number representing milliseconds
* - An object with `totalMs` property for the total timeout in milliseconds
* - An object with `stepMs` property for the timeout of each step in milliseconds
* - An object with `chunkMs` property for the timeout between stream chunks (streaming only)
*/
type TimeoutConfiguration = number | {
totalMs?: number;
stepMs?: number;
chunkMs?: number;
};
type CallSettings = {
/**
* Maximum number of tokens to generate.
*/
maxOutputTokens?: number;
/**
* Temperature setting. The range depends on the provider and model.
*
* It is recommended to set either `temperature` or `topP`, but not both.
*/
temperature?: number;
/**
* Nucleus sampling. This is a number between 0 and 1.
*
* E.g. 0.1 would mean that only tokens with the top 10% probability mass
* are considered.
*
* It is recommended to set either `temperature` or `topP`, but not both.
*/
topP?: number;
/**
* Only sample from the top K options for each subsequent token.
*
* Used to remove "long tail" low probability responses.
* Recommended for advanced use cases only. You usually only need to use temperature.
*/
topK?: number;
/**
* Presence penalty setting. It affects the likelihood of the model to
* repeat information that is already in the prompt.
*
* The presence penalty is a number between -1 (increase repetition)
* and 1 (maximum penalty, decrease repetition). 0 means no penalty.
*/
presencePenalty?: number;
/**
* Frequency penalty setting. It affects the likelihood of the model
* to repeatedly use the same words or phrases.
*
* The frequency penalty is a number between -1 (increase repetition)
* and 1 (maximum penalty, decrease repetition). 0 means no penalty.
*/
frequencyPenalty?: number;
/**
* Stop sequences.
* If set, the model will stop generating text when one of the stop sequences is generated.
* Providers may have limits on the number of stop sequences.
*/
stopSequences?: string[];
/**
* The seed (integer) to use for random sampling. If set and supported
* by the model, calls will generate deterministic results.
*/
seed?: number;
/**
* Maximum number of retries. Set to 0 to disable retries.
*
* @default 2
*/
maxRetries?: number;
/**
* Abort signal.
*/
abortSignal?: AbortSignal;
/**
* Timeout in milliseconds. The call will be aborted if it takes longer
* than the specified timeout. Can be used alongside abortSignal.
*
* Can be specified as a number (milliseconds) or as an object with `totalMs`.
*/
timeout?: TimeoutConfiguration;
/**
* Additional HTTP headers to be sent with the request.
* Only applicable for HTTP-based providers.
*/
headers?: Record<string, string | undefined>;
};
/**
* Prompt part of the AI function options.
* It contains a system message, a simple text prompt, or a list of messages.
*/
type Prompt = {
/**
* System message to include in the prompt. Can be used with `prompt` or `messages`.
*/
system?: string | SystemModelMessage | Array<SystemModelMessage>;
} & ({
/**
* A prompt. It can be either a text prompt or a list of messages.
*
* You can either use `prompt` or `messages` but not both.
*/
prompt: string | Array<ModelMessage>;
/**
* A list of messages.
*
* You can either use `prompt` or `messages` but not both.
*/
messages?: never;
} | {
/**
* A list of messages.
*
* You can either use `prompt` or `messages` but not both.
*/
messages: Array<ModelMessage>;
/**
* A prompt. It can be either a text prompt or a list of messages.
*
* You can either use `prompt` or `messages` but not both.
*/
prompt?: never;
});
/**
* Telemetry configuration.
*/
type TelemetrySettings = {
/**
* Enable or disable telemetry. Disabled by default while experimental.
*/
isEnabled?: boolean;
/**
* Enable or disable input recording. Enabled by default.
*
* You might want to disable input recording to avoid recording sensitive
* information, to reduce data transfers, or to increase performance.
*/
recordInputs?: boolean;
/**
* Enable or disable output recording. Enabled by default.
*
* You might want to disable output recording to avoid recording sensitive
* information, to reduce data transfers, or to increase performance.
*/
recordOutputs?: boolean;
/**
* Identifier for this function. Used to group telemetry data by function.
*/
functionId?: string;
/**
* Additional information to include in the telemetry data.
*/
metadata?: Record<string, AttributeValue>;
/**
* A custom tracer to use for the telemetry data.
*/
tracer?: Tracer;
};
/**
* Experimental. Can change in patch versions without warning.
*
* Download function. Called with the array of URLs and a boolean indicating
* whether the URL is supported by the model.
*
* The download function can decide for each URL:
* - to return null (which means that the URL should be passed to the model)
* - to download the asset and return the data (incl. retries, authentication, etc.)
*
* Should throw DownloadError if the download fails.
*
* Should return an array of objects sorted by the order of the requested downloads.
* For each object, the data should be a Uint8Array if the URL was downloaded.
* For each object, the mediaType should be the media type of the downloaded asset.
* For each object, the data should be null if the URL should be passed through as is.
*/
type DownloadFunction = (options: Array<{
url: URL;
isUrlSupportedByModel: boolean;
}>) => PromiseLike<Array<{
data: Uint8Array;
mediaType: string | undefined;
} | null>>;
/**
* A message that was generated during the generation process.
* It can be either an assistant message or a tool message.
*/
type ResponseMessage = AssistantModelMessage | ToolModelMessage;
/**
* The result of a single step in the generation process.
*/
type StepResult<TOOLS extends ToolSet> = {
/**
* The content that was generated in the last step.
*/
readonly content: Array<ContentPart<TOOLS>>;
/**
* The generated text.
*/
readonly text: string;
/**
* The reasoning that was generated during the generation.
*/
readonly reasoning: Array<ReasoningPart>;
/**
* The reasoning text that was generated during the generation.
*/
readonly reasoningText: string | undefined;
/**
* The files that were generated during the generation.
*/
readonly files: Array<GeneratedFile>;
/**
* The sources that were used to generate the text.
*/
readonly sources: Array<Source>;
/**
* The tool calls that were made during the generation.
*/
readonly toolCalls: Array<TypedToolCall<TOOLS>>;
/**
* The static tool calls that were made in the last step.
*/
readonly staticToolCalls: Array<StaticToolCall<TOOLS>>;
/**
* The dynamic tool calls that were made in the last step.
*/
readonly dynamicToolCalls: Array<DynamicToolCall>;
/**
* The results of the tool calls.
*/
readonly toolResults: Array<TypedToolResult<TOOLS>>;
/**
* The static tool results that were made in the last step.
*/
readonly staticToolResults: Array<StaticToolResult<TOOLS>>;
/**
* The dynamic tool results that were made in the last step.
*/
readonly dynamicToolResults: Array<DynamicToolResult>;
/**
* The unified reason why the generation finished.
*/
readonly finishReason: FinishReason;
/**
* The raw reason why the generation finished (from the provider).
*/
readonly rawFinishReason: string | undefined;
/**
* The token usage of the generated text.
*/
readonly usage: LanguageModelUsage;
/**
* Warnings from the model provider (e.g. unsupported settings).
*/
readonly warnings: CallWarning[] | undefined;
/**
* Additional request information.
*/
readonly request: LanguageModelRequestMetadata;
/**
* Additional response information.
*/
readonly response: LanguageModelResponseMetadata & {
/**
* The response messages that were generated during the call.
* Response messages can be either assistant messages or tool messages.
* They contain a generated id.
*/
readonly messages: Array<ResponseMessage>;
/**
* Response body (available only for providers that use HTTP requests).
*/
body?: unknown;
};
/**
* Additional provider-specific metadata. They are passed through
* from the provider to the AI SDK and enable provider-specific
* results that can be fully encapsulated in the provider.
*/
readonly providerMetadata: ProviderMetadata | undefined;
};
/**
* Function that you can use to provide different settings for a step.
*
* @param options - The options for the step.
* @param options.steps - The steps that have been executed so far.
* @param options.stepNumber - The number of the step that is being executed.
* @param options.model - The model that is being used.
* @param options.messages - The messages that will be sent to the model for the current step.
* @param options.experimental_context - The context passed via the experimental_context setting (experimental).
*
* @returns An object that contains the settings for the step.
* If you return undefined (or for undefined settings), the settings from the outer level will be used.
*/
type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Tool>> = (options: {
/**
* The steps that have been executed so far.
*/
steps: Array<StepResult<NoInfer<TOOLS>>>;
/**
* The number of the step that is being executed.
*/
stepNumber: number;
/**
* The model instance that is being used for this step.
*/
model: LanguageModel;
/**
* The messages that will be sent to the model for the current step.
*/
messages: Array<ModelMessage>;
/**
* The context passed via the experimental_context setting (experimental).
*/
experimental_context: unknown;
}) => PromiseLike<PrepareStepResult<TOOLS>> | PrepareStepResult<TOOLS>;
/**
* The result type returned by a {@link PrepareStepFunction},
* allowing per-step overrides of model, tools, or messages.
*/
type PrepareStepResult<TOOLS extends Record<string, Tool> = Record<string, Tool>> = {
/**
* Optionally override which LanguageModel instance is used for this step.
*/
model?: LanguageModel;
/**
* Optionally set which tool the model must call, or provide tool call configuration
* for this step.
*/
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
/**
* If provided, only these tools are enabled/available for this step.
*/
activeTools?: Array<keyof NoInfer<TOOLS>>;
/**
* Optionally override the system message(s) sent to the model for this step.
*/
system?: string | SystemModelMessage | Array<SystemModelMessage>;
/**
* Optionally override the full set of messages sent to the model
* for this step.
*/
messages?: Array<ModelMessage>;
/**
* Context that is passed into tool execution. Experimental.
*
* Changing the context will affect the context in this step
* and all subsequent steps.
*/
experimental_context?: unknown;
/**
* Additional provider-specific options for this step.
*
* Can be used to pass provider-specific configuration such as
* container IDs for Anthropic's code execution.
*/
providerOptions?: ProviderOptions;
} | undefined;
type StopCondition<TOOLS extends ToolSet> = (options: {
steps: Array<StepResult<TOOLS>>;
}) => PromiseLike<boolean> | boolean;
declare function stepCountIs(stepCount: number): StopCondition<any>;
declare function hasToolCall(toolName: string): StopCondition<any>;
declare const symbol$j: unique symbol;
declare class InvalidToolInputError extends AISDKError {
private readonly [symbol$j];
readonly toolName: string;
readonly toolInput: string;
constructor({ toolInput, toolName, cause, message, }: {
message?: string;
toolInput: string;
toolName: string;
cause: unknown;
});
static isInstance(error: unknown): error is InvalidToolInputError;
}
declare const symbol$i: unique symbol;
declare class NoSuchToolError extends AISDKError {
private readonly [symbol$i];
readonly toolName: string;
readonly availableTools: string[] | undefined;
constructor({ toolName, availableTools, message, }: {
toolName: string;
availableTools?: string[] | undefined;
message?: string;
});
static isInstance(error: unknown): error is NoSuchToolError;
}
declare const systemModelMessageSchema: z.ZodType<SystemModelMessage>;
declare const userModelMessageSchema: z.ZodType<UserModelMessage>;
declare const assistantModelMessageSchema: z.ZodType<AssistantModelMessage>;
declare const toolModelMessageSchema: z.ZodType<ToolModelMessage>;
declare const modelMessageSchema: z.ZodType<ModelMessage>;
/**
* A function that attempts to repair a tool call that failed to parse.
*
* It receives the error and the context as arguments and returns the repair
* tool call JSON as text.
*
* @param options.system - The system prompt.
* @param options.messages - The messages in the current generation step.
* @param options.toolCall - The tool call that failed to parse.
* @param options.tools - The tools that are available.
* @param options.inputSchema - A function that returns the JSON Schema for a tool.
* @param options.error - The error that occurred while parsing the tool call.
*/
type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
system: string | SystemModelMessage | Array<SystemModelMessage> | undefined;
messages: ModelMessage[];
toolCall: LanguageModelV3ToolCall;
tools: TOOLS;
inputSchema: (options: {
toolName: string;
}) => PromiseLike<JSONSchema7>;
error: NoSuchToolError | InvalidToolInputError;
}) => Promise<LanguageModelV3ToolCall | null>;
/**
* Callback that is set using the `onStepFinish` option.
*
* @param stepResult - The result of the step.
*/
type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
/**
* Callback that is set using the `onFinish` option.
*
* @param event - The event that is passed to the callback.
*/
type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
/**
* Details for all steps.
*/
readonly steps: StepResult<TOOLS>[];
/**
* Total usage for all steps. This is the sum of the usage of all steps.
*/
readonly totalUsage: LanguageModelUsage;
/**
* Context that is passed into tool execution.
*
* Experimental (can break in patch releases).
*
* @default undefined
*/
experimental_context: unknown;
}) => PromiseLike<void> | void;
/**
* Generate a text and call tools for a given prompt using a language model.
*
* This function does not stream the output. If you want to stream the output, use `streamText` instead.
*
* @param model - The language model to use.
*
* @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
* @param toolChoice - The tool choice strategy. Default: 'auto'.
*
* @param system - A system message that will be part of the prompt.
* @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
* @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
*
* @param maxOutputTokens - Maximum number of tokens to generate.
* @param temperature - Temperature setting.
* The value is passed through to the provider. The range depends on the provider and model.
* It is recommended to set either `temperature` or `topP`, but not both.
* @param topP - Nucleus sampling.
* The value is passed through to the provider. The range depends on the provider and model.
* It is recommended to set either `temperature` or `topP`, but not both.
* @param topK - Only sample from the top K options for each subsequent token.
* Used to remove "long tail" low probability responses.
* Recommended for advanced use cases only. You usually only need to use temperature.
* @param presencePenalty - Presence penalty setting.
* It affects the likelihood of the model to repeat information that is already in the prompt.
* The value is passed through to the provider. The range depends on the provider and model.
* @param frequencyPenalty - Frequency penalty setting.
* It affects the likelihood of the model to repeatedly use the same words or phrases.
* The value is passed through to the provider. The range depends on the provider and model.
* @param stopSequences - Stop sequences.
* If set, the model will stop generating text when one of the stop sequences is generated.
* @param seed - The seed (integer) to use for random sampling.
* If set and supported by the model, calls will generate deterministic results.
*
* @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
* @param abortSignal - An optional abort signal that can be used to cancel the call.
* @param timeout - An optional timeout in milliseconds. The call will be aborted if it takes longer than the specified timeout.
* @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
*
* @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
* @param onFinish - Callback that is called when all steps are finished and the response is complete.
*
* @returns
* A result object that contains the generated text, the results of the tool calls, and additional information.
*/
declare function generateText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, experimental_include: include, _internal: { generateId }, onStepFinish, onFinish, ...settings }: CallSettings & Prompt & {
/**
* The language model to use.
*/
model: LanguageModel;
/**
* The tools that the model can call. The model needs to support calling tools.
*/
tools?: TOOLS;
/**
* The tool choice strategy. Default: 'auto'.
*/
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
/**
* Condition for stopping the generation when there are tool results in the last step.
* When the condition is an array, any of the conditions can be met to stop the generation.
*
* @default stepCountIs(1)
*/
stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
/**
* Optional telemetry configuration (experimental).
*/
experimental_telemetry?: TelemetrySettings;
/**
* Additional provider-specific options. They are passed through
* to the provider from the AI SDK and enable provider-specific
* functionality that can be fully encapsulated in the provider.
*/
providerOptions?: ProviderOptions;
/**
* @deprecated Use `activeTools` instead.
*/
experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
/**
* Limits the tools that are available for the model to call without
* changing the tool call and result types in the result.
*/
activeTools?: Array<keyof NoInfer<TOOLS>>;
/**
* Optional specification for parsing structured outputs from the LLM response.
*/
output?: OUTPUT;
/**
* Optional specification for parsing structured outputs from the LLM response.
*
* @deprecated Use `output` instead.
*/
experimental_output?: OUTPUT;
/**
* Custom download function to use for URLs.
*
* By default, files are downloaded if the model does not support the URL for the given media type.
*/
experimental_download?: DownloadFunction | undefined;
/**
* @deprecated Use `prepareStep` instead.
*/
experimental_prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
/**
* Optional function that you can use to provide different settings for a step.
*/
prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
/**
* A function that attempts to repair a tool call that failed to parse.
*/
experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>;
/**
* Callback that is called when each step (LLM call) is finished, including intermediate steps.
*/
onStepFinish?: GenerateTextOnStepFinishCallback<NoInfer<TOOLS>>;
/**
* Callback that is called when all steps are finished and the response is complete.
*/
onFinish?: GenerateTextOnFinishCallback<NoInfer<TOOLS>>;
/**
* Context that is passed into tool execution.
*
* Experimental (can break in patch releases).
*
* @default undefined
*/
experimental_context?: unknown;
/**
* Settings for controlling what data is included in step results.
* Disabling inclusion can help reduce memory usage when processing
* large payloads like images.
*
* By default, all data is included for backwards compatibility.
*/
experimental_include?: {
/**
* Whether to retain the request body in step results.
* The request body can be large when sending images or files.
* @default true
*/
requestBody?: boolean;
/**
* Whether to retain the response body in step results.
* @default true
*/
responseBody?: boolean;
};
/**
* Internal. For test use only. May change without notice.
*/
_internal?: {
generateId?: IdGenerator;
};
}): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
/**
* Infers the complete output type from the output specification.
*/
type InferCompleteOutput<OUTPUT extends Output> = OUTPUT extends Output<infer COMPLETE_OUTPUT, any, any> ? COMPLETE_OUTPUT : never;
/**
* Infers the partial output type from the output specification.
*/
type InferPartialOutput<OUTPUT extends Output> = OUTPUT extends Output<any, infer PARTIAL_OUTPUT, any> ? PARTIAL_OUTPUT : never;
/**
* Infers the element type from an array output specification.
*/
type InferElementOutput<OUTPUT extends Output> = OUTPUT extends Output<any, any, infer ELEMENT> ? ELEMENT : never;
/**
* Prunes model messages from a list of model messages.
*
* @param messages - The list of model messages to prune.
* @param reasoning - How to remove reasoning content from assistant messages. Default is `'none'`.
* @param toolCalls - How to prune tool call/results/approval content. Default is `[]`.
* @param emptyMessages - Whether to keep or remove messages whose content is empty after pruning. Default is `'remove'`.
*
* @returns The pruned list of model messages.
*/
declare function pruneMessages({ messages, reasoning, toolCalls, emptyMessages, }: {
messages: ModelMessage[];
reasoning?: 'all' | 'before-last-message' | 'none';
toolCalls?: 'all' | 'before-last-message' | `before-last-${number}-messages` | 'none' | Array<{
type: 'all' | 'before-last-message' | `before-last-${number}-messages`;
tools?: string[];
}>;
emptyMessages?: 'keep' | 'remove';
}): ModelMessage[];
/**
* Detects the first chunk in a buffer.
*
* @param buffer - The buffer to detect the first chunk in.
*
* @returns The first detected chunk, or `undefined` if no chunk was detected.
*/
type ChunkDetector = (buffer: string) => string | undefined | null;
/**
* Smooths text and reasoning streaming output.
*
* @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
* @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, provide a custom RegExp pattern for custom chunking, provide an Intl.Segmenter for locale-aware word segmentation (recommended for CJK languages), or provide a custom ChunkDetector function.
*
* @returns A transform stream that smooths text streaming output.
*/
declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _internal: { delay }, }?: {
delayInMs?: number | null;
chunking?: 'word' | 'line' | RegExp | ChunkDetector | Intl.Segmenter;
/**
* Internal. For test use only. May change without notice.
*/
_internal?: {
delay?: (delayInMs: number | null) => Promise<void>;
};
}): (options: {
tools: TOOLS;
}) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
/**
* Tool output when the tool execution has been denied (for static tools).
*/
type StaticToolOutputDenied<TOOLS extends ToolSet> = ValueOf<{
[NAME in keyof TOOLS]: {
type: 'tool-output-denied';
toolCallId: string;
toolName: NAME & string;
providerExecuted?: boolean;
dynamic?: false | undefined;
};
}>;
/**
* Tool output when the tool execution has been denied.
*/
type TypedToolOutputDenied<TOOLS extends ToolSet> = StaticToolOutputDenied<TOOLS>;
/**
* The data types that can be used in the UI message for the UI message data parts.
*/
type UIDataTypes = Record<string, unknown>;
type UITool = {
input: unknown;
output: unknown | undefined;
};
/**
* Infer the input and output types of a tool so it can be used as a UI tool.
*/
type InferUITool<TOOL extends Tool> = {
input: InferToolInput<TOOL>;
output: InferToolOutput<TOOL>;
};
/**
* Infer the input and output types of a tool set so it can be used as a UI tool set.
*/
type InferUITools<TOOLS extends ToolSet> = {
[NAME in keyof TOOLS & string]: InferUITool<TOOLS[NAME]>;
};
type UITools = Record<string, UITool>;
/**
* AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
*/
interface UIMessage<METADATA = unknown, DATA_PARTS extends UIDataTypes = UIDataTypes, TOOLS extends UITools = UITools> {
/**
* A unique identifier for the message.
*/
id: string;
/**
* The role of the message.
*/
role: 'system' | 'user' | 'assistant';
/**
* The metadata of the message.
*/
metadata?: METADATA;
/**
* The parts of the message. Use this for rendering the message in the UI.
*
* System messages should be avoided (set the system prompt on the server instead).
* They can have text parts.
*
* User messages can have text parts and file parts.
*
* Assistant messages can have text, reasoning, tool invocation, and file parts.
*/
parts: Array<UIMessagePart<DATA_PARTS, TOOLS>>;
}
type UIMessagePart<DATA_TYPES extends UIDataTypes, TOOLS extends UITools> = TextUIPart | ReasoningUIPart | ToolUIPart<TOOLS> | DynamicToolUIPart | SourceUrlUIPart | SourceDocumentUIPart | FileUIPart | DataUIPart<DATA_TYPES> | StepStartUIPart;
/**
* A text part of a message.
*/
type TextUIPart = {
type: 'text';
/**
* The text content.
*/
text: string;
/**
* The state of the text part.
*/
state?: 'streaming' | 'done';
/**
* The provider metadata.
*/
providerMetadata?: ProviderMetadata;
};
/**
* A reasoning part of a message.
*/
type ReasoningUIPart = {
type: 'reasoning';
/**
* The reasoning text.
*/
text: string;
/**
* The state of the reasoning part.
*/
state?: 'streaming' | 'done';
/**
* The provider metadata.
*/
providerMetadata?: ProviderMetadata;
};
/**
* A source part of a message.
*/
type SourceUrlUIPart = {
type: 'source-url';
sourceId: string;
url: string;
title?: string;
providerMetadata?: ProviderMetadata;
};
/**
* A document source part of a message.
*/
type SourceDocumentUIPart = {
type: 'source-document';
sourceId: string;
mediaType: string;
title: string;
filename?: string;
providerMetadata?: ProviderMetadata;
};
/**
* A file part of a message.
*/
type FileUIPart = {
type: 'file';
/**
* IANA media type of the file.
*
* @see https://www.iana.org/assignments/media-types/media-types.xhtml
*/
mediaType: string;
/**
* Optional filename of the file.
*/
filename?: string;
/**
* The URL of the file.
* It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs).
*/
url: string;
/**
* The provider metadata.
*/
providerMetadata?: ProviderMetadata;
};
/**
* A step boundary part of a message.
*/
type StepStartUIPart = {
type: 'step-start';
};
type DataUIPart<DATA_TYPES extends UIDataTypes> = ValueOf<{
[NAME in keyof DATA_TYPES & string]: {
type: `data-${NAME}`;
id?: string;
data: DATA_TYPES[NAME];
};
}>;
type asUITool<TOOL extends UITool | Tool> = TOOL extends Tool ? InferUITool<TOOL> : TOOL;
/**
* Check if a message part is a data part.
*/
declare function isDataUIPart<DATA_TYPES extends UIDataTypes>(part: UIMessagePart<DATA_TYPES, UITools>): part is DataUIPart<DATA_TYPES>;
/**
* A UI tool invocation contains all the information needed to render a tool invocation in the UI.
* It can be derived from a tool without knowing the tool name, and can be