ai
Version:
AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript
1,369 lines (1,326 loc) • 248 kB
text/typescript
import { GatewayModelId } from '@ai-sdk/gateway';
export { GatewayModelId, createGateway, gateway } from '@ai-sdk/gateway';
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
import { Tool, InferToolInput, InferToolOutput, FlexibleSchema, InferSchema, SystemModelMessage, ModelMessage, AssistantModelMessage, ToolModelMessage, ReasoningPart, ProviderOptions, UserModelMessage, IdGenerator, ToolCall, MaybePromiseLike, TextPart, FilePart, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
export { AssistantContent, AssistantModelMessage, DataContent, DownloadError, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolExecutionOptions, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
import * as _ai_sdk_provider from '@ai-sdk/provider';
import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, EmbeddingModelV3Middleware, ImageModelV3, ImageModelV2, ImageModelV3ProviderMetadata, ImageModelV2ProviderMetadata, ImageModelV3Middleware, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, SharedV3Warning, LanguageModelV3Source, LanguageModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, TranscriptionModelV3, TranscriptionModelV2, JSONObject, ImageModelV3Usage, LanguageModelV3ToolChoice, AISDKError, LanguageModelV3ToolCall, JSONSchema7, LanguageModelV3CallOptions, JSONParseError, TypeValidationError, Experimental_VideoModelV3, EmbeddingModelV3CallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, LoadSettingError, NoContentGeneratedError, NoSuchModelError, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
import { AttributeValue, Tracer } from '@opentelemetry/api';
import { ServerResponse } from 'node:http';
import { ServerResponse as ServerResponse$1 } from 'http';
import { z } from 'zod/v4';
/**
* Embedding model that is used by the AI SDK.
*/
type EmbeddingModel = string | EmbeddingModelV3 | EmbeddingModelV2<string>;
/**
* Embedding.
*/
type Embedding = EmbeddingModelV3Embedding;
type EmbeddingModelMiddleware = EmbeddingModelV3Middleware;
/**
* Image model that is used by the AI SDK.
*/
type ImageModel = string | ImageModelV3 | ImageModelV2;
/**
* Metadata from the model provider for this call.
*/
type ImageModelProviderMetadata = ImageModelV3ProviderMetadata | ImageModelV2ProviderMetadata;
type ImageModelMiddleware = ImageModelV3Middleware;
type ImageModelResponseMetadata = {
/**
* Timestamp for the start of the generated response.
*/
timestamp: Date;
/**
* The ID of the response model that was used to generate the response.
*/
modelId: string;
/**
* Response headers.
*/
headers?: Record<string, string>;
};
type JSONValue = JSONValue$1;
declare global {
/**
* Global interface that can be augmented by third-party packages to register custom model IDs.
*
* You can register model IDs in two ways:
*
* 1. Register based on Model IDs from a provider package:
* @example
* ```typescript
* import { openai } from '@ai-sdk/openai';
* type OpenAIResponsesModelId = Parameters<typeof openai>[0];
*
* declare global {
* interface RegisteredProviderModels {
* openai: OpenAIResponsesModelId;
* }
* }
* ```
*
* 2. Register individual model IDs directly as keys:
* @example
* ```typescript
* declare global {
* interface RegisteredProviderModels {
* 'my-provider:my-model': any;
* 'my-provider:another-model': any;
* }
* }
* ```
*/
interface RegisteredProviderModels {
}
}
/**
* Global provider model ID type that defaults to GatewayModelId but can be augmented
* by third-party packages via declaration merging.
*/
type GlobalProviderModelId = [keyof RegisteredProviderModels] extends [
never
] ? GatewayModelId : keyof RegisteredProviderModels | RegisteredProviderModels[keyof RegisteredProviderModels];
/**
* Language model that is used by the AI SDK.
*/
type LanguageModel = GlobalProviderModelId | LanguageModelV3 | LanguageModelV2;
/**
* Reason why a language model finished generating a response.
*
* Can be one of the following:
* - `stop`: model generated stop sequence
* - `length`: model generated maximum number of tokens
* - `content-filter`: content filter violation stopped the model
* - `tool-calls`: model triggered tool calls
* - `error`: model stopped because of an error
* - `other`: model stopped for other reasons
*/
type FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
/**
* Warning from the model provider for this call. The call will proceed, but e.g.
* some settings might not be supported, which can lead to suboptimal results.
*/
type CallWarning = SharedV3Warning;
/**
* A source that has been used as input to generate the response.
*/
type Source = LanguageModelV3Source;
/**
* Tool choice for the generation. It supports the following settings:
*
* - `auto` (default): the model can choose whether and which tools to call.
* - `required`: the model must call a tool. It can choose which tool to call.
* - `none`: the model must not call tools
* - `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
*/
type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
type: 'tool';
toolName: Extract<keyof TOOLS, string>;
};
type LanguageModelMiddleware = LanguageModelV3Middleware;
type LanguageModelRequestMetadata = {
/**
* Request HTTP body that was sent to the provider API.
*/
body?: unknown;
};
type LanguageModelResponseMetadata = {
/**
* ID for the generated response.
*/
id: string;
/**
* Timestamp for the start of the generated response.
*/
timestamp: Date;
/**
* The ID of the response model that was used to generate the response.
*/
modelId: string;
/**
* Response headers (available only for providers that use HTTP requests).
*/
headers?: Record<string, string>;
};
/**
* Reranking model that is used by the AI SDK.
*/
type RerankingModel = RerankingModelV3;
/**
* Provider for language, text embedding, and image models.
*/
type Provider = {
/**
* Returns the language model with the given id.
* The model id is then passed to the provider function to get the model.
*
* @param {string} modelId - The id of the model to return.
*
* @returns {LanguageModel} The language model associated with the id
*
* @throws {NoSuchModelError} If no such model exists.
*/
languageModel(modelId: string): LanguageModel;
/**
* Returns the text embedding model with the given id.
* The model id is then passed to the provider function to get the model.
*
* @param {string} modelId - The id of the model to return.
*
* @returns {EmbeddingModel} The embedding model associated with the id
*
* @throws {NoSuchModelError} If no such model exists.
*/
embeddingModel(modelId: string): EmbeddingModel;
/**
* Returns the image model with the given id.
* The model id is then passed to the provider function to get the model.
*
* @param {string} modelId - The id of the model to return.
*
* @returns {ImageModel} The image model associated with the id
*/
imageModel(modelId: string): ImageModel;
/**
* Returns the reranking model with the given id.
* The model id is then passed to the provider function to get the model.
*
* @param {string} modelId - The id of the model to return.
*
* @returns {RerankingModel} The reranking model associated with the id
*
* @throws {NoSuchModelError} If no such model exists.
*/
rerankingModel(modelId: string): RerankingModel;
};
/**
* Additional provider-specific metadata that is returned from the provider.
*
* This is needed to enable provider-specific functionality that can be
* fully encapsulated in the provider.
*/
type ProviderMetadata = SharedV3ProviderMetadata;
/**
* Speech model that is used by the AI SDK.
*/
type SpeechModel = string | SpeechModelV3 | SpeechModelV2;
type SpeechModelResponseMetadata = {
/**
* Timestamp for the start of the generated response.
*/
timestamp: Date;
/**
* The ID of the response model that was used to generate the response.
*/
modelId: string;
/**
* Response headers.
*/
headers?: Record<string, string>;
/**
* Response body.
*/
body?: unknown;
};
/**
* Transcription model that is used by the AI SDK.
*/
type TranscriptionModel = string | TranscriptionModelV3 | TranscriptionModelV2;
type TranscriptionModelResponseMetadata = {
/**
* Timestamp for the start of the generated response.
*/
timestamp: Date;
/**
* The ID of the response model that was used to generate the response.
*/
modelId: string;
/**
* Response headers.
*/
headers?: Record<string, string>;
};
/**
* Represents the number of tokens used in a prompt and completion.
*/
type LanguageModelUsage = {
/**
* The total number of input (prompt) tokens used.
*/
inputTokens: number | undefined;
/**
* Detailed information about the input tokens.
*/
inputTokenDetails: {
/**
* The number of non-cached input (prompt) tokens used.
*/
noCacheTokens: number | undefined;
/**
* The number of cached input (prompt) tokens read.
*/
cacheReadTokens: number | undefined;
/**
* The number of cached input (prompt) tokens written.
*/
cacheWriteTokens: number | undefined;
};
/**
* The number of total output (completion) tokens used.
*/
outputTokens: number | undefined;
/**
* Detailed information about the output tokens.
*/
outputTokenDetails: {
/**
* The number of text tokens used.
*/
textTokens: number | undefined;
/**
* The number of reasoning tokens used.
*/
reasoningTokens: number | undefined;
};
/**
* The total number of tokens used.
*/
totalTokens: number | undefined;
/**
* @deprecated Use outputTokenDetails.reasoningTokens instead.
*/
reasoningTokens?: number | undefined;
/**
* @deprecated Use inputTokenDetails.cacheReadTokens instead.
*/
cachedInputTokens?: number | undefined;
/**
* Raw usage information from the provider.
*
* This is the usage information in the shape that the provider returns.
* It can include additional information that is not part of the standard usage information.
*/
raw?: JSONObject;
};
/**
* Represents the number of tokens used in an embedding.
*/
type EmbeddingModelUsage = {
/**
* The number of tokens used in the embedding.
*/
tokens: number;
};
/**
* Usage information for an image model call.
*/
type ImageModelUsage = ImageModelV3Usage;
/**
* Warning from the model provider for this call. The call will proceed, but e.g.
* some settings might not be supported, which can lead to suboptimal results.
*/
type Warning = SharedV3Warning;
/**
* A generated file.
*/
interface GeneratedFile {
/**
* File as a base64 encoded string.
*/
readonly base64: string;
/**
* File as a Uint8Array.
*/
readonly uint8Array: Uint8Array;
/**
* The IANA media type of the file.
*
* @see https://www.iana.org/assignments/media-types/media-types.xhtml
*/
readonly mediaType: string;
}
declare class DefaultGeneratedFile implements GeneratedFile {
private base64Data;
private uint8ArrayData;
readonly mediaType: string;
constructor({ data, mediaType, }: {
data: string | Uint8Array;
mediaType: string;
});
get base64(): string;
get uint8Array(): Uint8Array<ArrayBufferLike>;
}
/**
* Create a union of the given object's values, and optionally specify which keys to get the values from.
*
* Please upvote [this issue](https://github.com/microsoft/TypeScript/issues/31438) if you want to have this type as a built-in in TypeScript.
*
* @example
* ```
* // data.json
* {
* 'foo': 1,
* 'bar': 2,
* 'biz': 3
* }
*
* // main.ts
* import type {ValueOf} from 'type-fest';
* import data = require('./data.json');
*
* export function getData(name: string): ValueOf<typeof data> {
* return data[name];
* }
*
* export function onlyBar(name: string): ValueOf<typeof data, 'bar'> {
* return data[name];
* }
*
* // file.ts
* import {getData, onlyBar} from './main';
*
* getData('foo');
* //=> 1
*
* onlyBar('foo');
* //=> TypeError ...
*
* onlyBar('bar');
* //=> 2
* ```
* @see https://github.com/sindresorhus/type-fest/blob/main/source/value-of.d.ts
*/
type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
type ToolSet = Record<string, (Tool<never, never> | Tool<any, any> | Tool<any, never> | Tool<never, any>) & Pick<Tool<any, any>, 'execute' | 'onInputAvailable' | 'onInputStart' | 'onInputDelta' | 'needsApproval'>>;
type BaseToolCall = {
type: 'tool-call';
toolCallId: string;
providerExecuted?: boolean;
providerMetadata?: ProviderMetadata;
};
type StaticToolCall<TOOLS extends ToolSet> = ValueOf<{
[NAME in keyof TOOLS]: BaseToolCall & {
toolName: NAME & string;
input: TOOLS[NAME] extends Tool<infer PARAMETERS> ? PARAMETERS : never;
dynamic?: false | undefined;
invalid?: false | undefined;
error?: never;
title?: string;
};
}>;
type DynamicToolCall = BaseToolCall & {
toolName: string;
input: unknown;
dynamic: true;
title?: string;
/**
* True if this is caused by an unparsable tool call or
* a tool that does not exist.
*/
invalid?: boolean;
/**
* The error that caused the tool call to be invalid.
*/
error?: unknown;
};
type TypedToolCall<TOOLS extends ToolSet> = StaticToolCall<TOOLS> | DynamicToolCall;
/**
* Output part that indicates that a tool approval request has been made.
*
* The tool approval request can be approved or denied in the next tool message.
*/
type ToolApprovalRequestOutput<TOOLS extends ToolSet> = {
type: 'tool-approval-request';
/**
* ID of the tool approval request.
*/
approvalId: string;
/**
* Tool call that the approval request is for.
*/
toolCall: TypedToolCall<TOOLS>;
};
/**
* Reasoning output of a text generation. It contains a reasoning.
*/
interface ReasoningOutput {
type: 'reasoning';
/**
* The reasoning text.
*/
text: string;
/**
* Additional provider-specific metadata. They are passed through
* to the provider from the AI SDK and enable provider-specific
* functionality that can be fully encapsulated in the provider.
*/
providerMetadata?: ProviderMetadata;
}
type StaticToolError<TOOLS extends ToolSet> = ValueOf<{
[NAME in keyof TOOLS]: {
type: 'tool-error';
toolCallId: string;
toolName: NAME & string;
input: InferToolInput<TOOLS[NAME]>;
error: unknown;
providerExecuted?: boolean;
providerMetadata?: ProviderMetadata;
dynamic?: false | undefined;
title?: string;
};
}>;
type DynamicToolError = {
type: 'tool-error';
toolCallId: string;
toolName: string;
input: unknown;
error: unknown;
providerExecuted?: boolean;
providerMetadata?: ProviderMetadata;
dynamic: true;
title?: string;
};
type TypedToolError<TOOLS extends ToolSet> = StaticToolError<TOOLS> | DynamicToolError;
type StaticToolResult<TOOLS extends ToolSet> = ValueOf<{
[NAME in keyof TOOLS]: {
type: 'tool-result';
toolCallId: string;
toolName: NAME & string;
input: InferToolInput<TOOLS[NAME]>;
output: InferToolOutput<TOOLS[NAME]>;
providerExecuted?: boolean;
providerMetadata?: ProviderMetadata;
dynamic?: false | undefined;
preliminary?: boolean;
title?: string;
};
}>;
type DynamicToolResult = {
type: 'tool-result';
toolCallId: string;
toolName: string;
input: unknown;
output: unknown;
providerExecuted?: boolean;
providerMetadata?: ProviderMetadata;
dynamic: true;
preliminary?: boolean;
title?: string;
};
type TypedToolResult<TOOLS extends ToolSet> = StaticToolResult<TOOLS> | DynamicToolResult;
type ContentPart<TOOLS extends ToolSet> = {
type: 'text';
text: string;
providerMetadata?: ProviderMetadata;
} | ReasoningOutput | ({
type: 'source';
} & Source) | {
type: 'file';
file: GeneratedFile;
providerMetadata?: ProviderMetadata;
} | ({
type: 'tool-call';
} & TypedToolCall<TOOLS> & {
providerMetadata?: ProviderMetadata;
}) | ({
type: 'tool-result';
} & TypedToolResult<TOOLS> & {
providerMetadata?: ProviderMetadata;
}) | ({
type: 'tool-error';
} & TypedToolError<TOOLS> & {
providerMetadata?: ProviderMetadata;
}) | ToolApprovalRequestOutput<TOOLS>;
/**
* Create a type from an object with all keys and nested keys set to optional.
* The helper supports normal objects and schemas (which are resolved automatically).
* It always recurses into arrays.
*
* Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
*/
type DeepPartial<T> = T extends FlexibleSchema ? DeepPartialInternal<InferSchema<T>> : DeepPartialInternal<T>;
type DeepPartialInternal<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartialInternal<ItemType | undefined>> : Array<DeepPartialInternal<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown;
type PartialMap<KeyType, ValueType> = {} & Map<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
type PartialSet<T> = {} & Set<DeepPartialInternal<T>>;
type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartialInternal<T>>;
type PartialObject<ObjectType extends object> = {
[KeyType in keyof ObjectType]?: DeepPartialInternal<ObjectType[KeyType]>;
};
/**
* Timeout configuration for API calls. Can be specified as:
* - A number representing milliseconds
* - An object with `totalMs` property for the total timeout in milliseconds
* - An object with `stepMs` property for the timeout of each step in milliseconds
* - An object with `chunkMs` property for the timeout between stream chunks (streaming only)
*/
type TimeoutConfiguration = number | {
totalMs?: number;
stepMs?: number;
chunkMs?: number;
};
type CallSettings = {
/**
* Maximum number of tokens to generate.
*/
maxOutputTokens?: number;
/**
* Temperature setting. The range depends on the provider and model.
*
* It is recommended to set either `temperature` or `topP`, but not both.
*/
temperature?: number;
/**
* Nucleus sampling. This is a number between 0 and 1.
*
* E.g. 0.1 would mean that only tokens with the top 10% probability mass
* are considered.
*
* It is recommended to set either `temperature` or `topP`, but not both.
*/
topP?: number;
/**
* Only sample from the top K options for each subsequent token.
*
* Used to remove "long tail" low probability responses.
* Recommended for advanced use cases only. You usually only need to use temperature.
*/
topK?: number;
/**
* Presence penalty setting. It affects the likelihood of the model to
* repeat information that is already in the prompt.
*
* The presence penalty is a number between -1 (increase repetition)
* and 1 (maximum penalty, decrease repetition). 0 means no penalty.
*/
presencePenalty?: number;
/**
* Frequency penalty setting. It affects the likelihood of the model
* to repeatedly use the same words or phrases.
*
* The frequency penalty is a number between -1 (increase repetition)
* and 1 (maximum penalty, decrease repetition). 0 means no penalty.
*/
frequencyPenalty?: number;
/**
* Stop sequences.
* If set, the model will stop generating text when one of the stop sequences is generated.
* Providers may have limits on the number of stop sequences.
*/
stopSequences?: string[];
/**
* The seed (integer) to use for random sampling. If set and supported
* by the model, calls will generate deterministic results.
*/
seed?: number;
/**
* Maximum number of retries. Set to 0 to disable retries.
*
* @default 2
*/
maxRetries?: number;
/**
* Abort signal.
*/
abortSignal?: AbortSignal;
/**
* Timeout in milliseconds. The call will be aborted if it takes longer
* than the specified timeout. Can be used alongside abortSignal.
*
* Can be specified as a number (milliseconds) or as an object with `totalMs`.
*/
timeout?: TimeoutConfiguration;
/**
* Additional HTTP headers to be sent with the request.
* Only applicable for HTTP-based providers.
*/
headers?: Record<string, string | undefined>;
};
/**
* Prompt part of the AI function options.
* It contains a system message, a simple text prompt, or a list of messages.
*/
type Prompt = {
/**
* System message to include in the prompt. Can be used with `prompt` or `messages`.
*/
system?: string | SystemModelMessage | Array<SystemModelMessage>;
} & ({
/**
* A prompt. It can be either a text prompt or a list of messages.
*
* You can either use `prompt` or `messages` but not both.
*/
prompt: string | Array<ModelMessage>;
/**
* A list of messages.
*
* You can either use `prompt` or `messages` but not both.
*/
messages?: never;
} | {
/**
* A list of messages.
*
* You can either use `prompt` or `messages` but not both.
*/
messages: Array<ModelMessage>;
/**
* A prompt. It can be either a text prompt or a list of messages.
*
* You can either use `prompt` or `messages` but not both.
*/
prompt?: never;
});
/**
* Telemetry configuration.
*/
type TelemetrySettings = {
/**
* Enable or disable telemetry. Disabled by default while experimental.
*/
isEnabled?: boolean;
/**
* Enable or disable input recording. Enabled by default.
*
* You might want to disable input recording to avoid recording sensitive
* information, to reduce data transfers, or to increase performance.
*/
recordInputs?: boolean;
/**
* Enable or disable output recording. Enabled by default.
*
* You might want to disable output recording to avoid recording sensitive
* information, to reduce data transfers, or to increase performance.
*/
recordOutputs?: boolean;
/**
* Identifier for this function. Used to group telemetry data by function.
*/
functionId?: string;
/**
* Additional information to include in the telemetry data.
*/
metadata?: Record<string, AttributeValue>;
/**
* A custom tracer to use for the telemetry data.
*/
tracer?: Tracer;
};
/**
* Experimental. Can change in patch versions without warning.
*
* Download function. Called with the array of URLs and a boolean indicating
* whether the URL is supported by the model.
*
* The download function can decide for each URL:
* - to return null (which means that the URL should be passed to the model)
* - to download the asset and return the data (incl. retries, authentication, etc.)
*
* Should throw DownloadError if the download fails.
*
* Should return an array of objects sorted by the order of the requested downloads.
* For each object, the data should be a Uint8Array if the URL was downloaded.
* For each object, the mediaType should be the media type of the downloaded asset.
* For each object, the data should be null if the URL should be passed through as is.
*/
type DownloadFunction = (options: Array<{
url: URL;
isUrlSupportedByModel: boolean;
}>) => PromiseLike<Array<{
data: Uint8Array;
mediaType: string | undefined;
} | null>>;
/**
* A message that was generated during the generation process.
* It can be either an assistant message or a tool message.
*/
type ResponseMessage = AssistantModelMessage | ToolModelMessage;
/**
* The result of a single step in the generation process.
*/
type StepResult<TOOLS extends ToolSet> = {
/**
* Zero-based index of this step.
*/
readonly stepNumber: number;
/**
* Information about the model that produced this step.
*/
readonly model: {
/** The provider of the model. */
readonly provider: string;
/** The ID of the model. */
readonly modelId: string;
};
/**
* Identifier from telemetry settings for grouping related operations.
*/
readonly functionId: string | undefined;
/**
* Additional metadata from telemetry settings.
*/
readonly metadata: Record<string, unknown> | undefined;
/**
* User-defined context object flowing through the generation.
*
* Experimental (can break in patch releases).
*/
readonly experimental_context: unknown;
/**
* The content that was generated in the last step.
*/
readonly content: Array<ContentPart<TOOLS>>;
/**
* The generated text.
*/
readonly text: string;
/**
* The reasoning that was generated during the generation.
*/
readonly reasoning: Array<ReasoningPart>;
/**
* The reasoning text that was generated during the generation.
*/
readonly reasoningText: string | undefined;
/**
* The files that were generated during the generation.
*/
readonly files: Array<GeneratedFile>;
/**
* The sources that were used to generate the text.
*/
readonly sources: Array<Source>;
/**
* The tool calls that were made during the generation.
*/
readonly toolCalls: Array<TypedToolCall<TOOLS>>;
/**
* The static tool calls that were made in the last step.
*/
readonly staticToolCalls: Array<StaticToolCall<TOOLS>>;
/**
* The dynamic tool calls that were made in the last step.
*/
readonly dynamicToolCalls: Array<DynamicToolCall>;
/**
* The results of the tool calls.
*/
readonly toolResults: Array<TypedToolResult<TOOLS>>;
/**
* The static tool results that were made in the last step.
*/
readonly staticToolResults: Array<StaticToolResult<TOOLS>>;
/**
* The dynamic tool results that were made in the last step.
*/
readonly dynamicToolResults: Array<DynamicToolResult>;
/**
* The unified reason why the generation finished.
*/
readonly finishReason: FinishReason;
/**
* The raw reason why the generation finished (from the provider).
*/
readonly rawFinishReason: string | undefined;
/**
* The token usage of the generated text.
*/
readonly usage: LanguageModelUsage;
/**
* Warnings from the model provider (e.g. unsupported settings).
*/
readonly warnings: CallWarning[] | undefined;
/**
* Additional request information.
*/
readonly request: LanguageModelRequestMetadata;
/**
* Additional response information.
*/
readonly response: LanguageModelResponseMetadata & {
/**
* The response messages that were generated during the call.
* Response messages can be either assistant messages or tool messages.
* They contain a generated id.
*/
readonly messages: Array<ResponseMessage>;
/**
* Response body (available only for providers that use HTTP requests).
*/
body?: unknown;
};
/**
* Additional provider-specific metadata. They are passed through
* from the provider to the AI SDK and enable provider-specific
* results that can be fully encapsulated in the provider.
*/
readonly providerMetadata: ProviderMetadata | undefined;
};
type StopCondition<TOOLS extends ToolSet> = (options: {
steps: Array<StepResult<TOOLS>>;
}) => PromiseLike<boolean> | boolean;
declare function stepCountIs(stepCount: number): StopCondition<any>;
declare function hasToolCall(toolName: string): StopCondition<any>;
/**
* Common model information used across callback events.
*/
interface CallbackModelInfo {
/** The provider identifier (e.g., 'openai', 'anthropic'). */
readonly provider: string;
/** The specific model identifier (e.g., 'gpt-4o'). */
readonly modelId: string;
}
/**
* Event passed to the `onStart` callback.
*
* Called when the generation operation begins, before any LLM calls.
*/
interface OnStartEvent<TOOLS extends ToolSet = ToolSet, OUTPUT extends Output = Output, INCLUDE = {
requestBody?: boolean;
responseBody?: boolean;
}> {
/** The model being used for generation. */
readonly model: CallbackModelInfo;
/** The system message(s) provided to the model. */
readonly system: string | SystemModelMessage | Array<SystemModelMessage> | undefined;
/** The prompt string or array of messages if using the prompt option. */
readonly prompt: string | Array<ModelMessage> | undefined;
/** The messages array if using the messages option. */
readonly messages: Array<ModelMessage> | undefined;
/** The tools available for this generation. */
readonly tools: TOOLS | undefined;
/** The tool choice strategy for this generation. */
readonly toolChoice: ToolChoice<NoInfer<TOOLS>> | undefined;
/** Limits which tools are available for the model to call. */
readonly activeTools: Array<keyof TOOLS> | undefined;
/** Maximum number of tokens to generate. */
readonly maxOutputTokens: number | undefined;
/** Sampling temperature for generation. */
readonly temperature: number | undefined;
/** Top-p (nucleus) sampling parameter. */
readonly topP: number | undefined;
/** Top-k sampling parameter. */
readonly topK: number | undefined;
/** Presence penalty for generation. */
readonly presencePenalty: number | undefined;
/** Frequency penalty for generation. */
readonly frequencyPenalty: number | undefined;
/** Sequences that will stop generation. */
readonly stopSequences: string[] | undefined;
/** Random seed for reproducible generation. */
readonly seed: number | undefined;
/** Maximum number of retries for failed requests. */
readonly maxRetries: number;
/**
* Timeout configuration for the generation.
* Can be a number (milliseconds) or an object with totalMs, stepMs, chunkMs.
*/
readonly timeout: TimeoutConfiguration | undefined;
/** Additional HTTP headers sent with the request. */
readonly headers: Record<string, string | undefined> | undefined;
/** Additional provider-specific options. */
readonly providerOptions: ProviderOptions | undefined;
/**
* Condition(s) for stopping the generation.
* When the condition is an array, any of the conditions can be met to stop.
*/
readonly stopWhen: StopCondition<TOOLS> | Array<StopCondition<TOOLS>> | undefined;
/** The output specification for structured outputs, if configured. */
readonly output: OUTPUT | undefined;
/** Abort signal for cancelling the operation. */
readonly abortSignal: AbortSignal | undefined;
/**
* Settings for controlling what data is included in step results.
*/
readonly include: INCLUDE | undefined;
/** Identifier from telemetry settings for grouping related operations. */
readonly functionId: string | undefined;
/** Additional metadata passed to the generation. */
readonly metadata: Record<string, unknown> | undefined;
/**
* User-defined context object that flows through the entire generation lifecycle.
* Can be accessed and modified in `prepareStep` and tool `execute` functions.
*/
readonly experimental_context: unknown;
}
/**
* Event passed to the `onStepStart` callback.
*
* Called when a step (LLM call) begins, before the provider is called.
* Each step represents a single LLM invocation.
*/
interface OnStepStartEvent<TOOLS extends ToolSet = ToolSet, OUTPUT extends Output = Output, INCLUDE = {
requestBody?: boolean;
responseBody?: boolean;
}> {
/** Zero-based index of the current step. */
readonly stepNumber: number;
/** The model being used for this step. */
readonly model: CallbackModelInfo;
/**
* The system message for this step.
*/
readonly system: string | SystemModelMessage | Array<SystemModelMessage> | undefined;
/**
* The messages that will be sent to the model for this step.
* Uses the user-facing `ModelMessage` format.
* May be overridden by prepareStep.
*/
readonly messages: Array<ModelMessage>;
/** The tools available for this generation. */
readonly tools: TOOLS | undefined;
/** The tool choice configuration for this step. */
readonly toolChoice: LanguageModelV3ToolChoice | undefined;
/** Limits which tools are available for this step. */
readonly activeTools: Array<keyof TOOLS> | undefined;
/** Array of results from previous steps (empty for first step). */
readonly steps: ReadonlyArray<StepResult<TOOLS>>;
/** Additional provider-specific options for this step. */
readonly providerOptions: ProviderOptions | undefined;
/**
* Timeout configuration for the generation.
* Can be a number (milliseconds) or an object with totalMs, stepMs, chunkMs.
*/
readonly timeout: TimeoutConfiguration | undefined;
/** Additional HTTP headers sent with the request. */
readonly headers: Record<string, string | undefined> | undefined;
/**
* Condition(s) for stopping the generation.
* When the condition is an array, any of the conditions can be met to stop.
*/
readonly stopWhen: StopCondition<TOOLS> | Array<StopCondition<TOOLS>> | undefined;
/** The output specification for structured outputs, if configured. */
readonly output: OUTPUT | undefined;
/** Abort signal for cancelling the operation. */
readonly abortSignal: AbortSignal | undefined;
/**
* Settings for controlling what data is included in step results.
*/
readonly include: INCLUDE | undefined;
/** Identifier from telemetry settings for grouping related operations. */
readonly functionId: string | undefined;
/** Additional metadata from telemetry settings. */
readonly metadata: Record<string, unknown> | undefined;
/**
* User-defined context object. May be updated from `prepareStep` between steps.
*/
readonly experimental_context: unknown;
}
/**
* Event passed to the `onToolCallStart` callback.
*
* Called when a tool execution begins, before the tool's `execute` function is invoked.
*/
interface OnToolCallStartEvent<TOOLS extends ToolSet = ToolSet> {
/** Zero-based index of the current step where this tool call occurs. */
readonly stepNumber: number | undefined;
/** The model being used for this step. */
readonly model: CallbackModelInfo | undefined;
/** The full tool call object. */
readonly toolCall: TypedToolCall<TOOLS>;
/** The conversation messages available at tool execution time. */
readonly messages: Array<ModelMessage>;
/** Signal for cancelling the operation. */
readonly abortSignal: AbortSignal | undefined;
/** Identifier from telemetry settings for grouping related operations. */
readonly functionId: string | undefined;
/** Additional metadata from telemetry settings. */
readonly metadata: Record<string, unknown> | undefined;
/** User-defined context object flowing through the generation. */
readonly experimental_context: unknown;
}
/**
* Event passed to the `onToolCallFinish` callback.
*
* Called when a tool execution completes, either successfully or with an error.
* Uses a discriminated union on the `success` field.
*/
type OnToolCallFinishEvent<TOOLS extends ToolSet = ToolSet> = {
/** Zero-based index of the current step where this tool call occurred. */
readonly stepNumber: number | undefined;
/** The model being used for this step. */
readonly model: CallbackModelInfo | undefined;
/** The full tool call object. */
readonly toolCall: TypedToolCall<TOOLS>;
/** The conversation messages available at tool execution time. */
readonly messages: Array<ModelMessage>;
/** Signal for cancelling the operation. */
readonly abortSignal: AbortSignal | undefined;
/** Execution time of the tool call in milliseconds. */
readonly durationMs: number;
/** Identifier from telemetry settings for grouping related operations. */
readonly functionId: string | undefined;
/** Additional metadata from telemetry settings. */
readonly metadata: Record<string, unknown> | undefined;
/** User-defined context object flowing through the generation. */
readonly experimental_context: unknown;
} & ({
/** Indicates the tool call succeeded. */
readonly success: true;
/** The tool's return value. */
readonly output: unknown;
readonly error?: never;
} | {
/** Indicates the tool call failed. */
readonly success: false;
readonly output?: never;
/** The error that occurred during tool execution. */
readonly error: unknown;
});
/**
* Event passed to the `onStepFinish` callback.
*
* Called when a step (LLM call) completes.
* This is simply the StepResult for that step.
*/
type OnStepFinishEvent<TOOLS extends ToolSet = ToolSet> = StepResult<TOOLS>;
/**
* Event passed to the `onFinish` callback.
*
* Called when the entire generation completes (all steps finished).
* Includes the final step's result along with aggregated data from all steps.
*/
type OnFinishEvent<TOOLS extends ToolSet = ToolSet> = StepResult<TOOLS> & {
/** Array containing results from all steps in the generation. */
readonly steps: StepResult<TOOLS>[];
/** Aggregated token usage across all steps. */
readonly totalUsage: LanguageModelUsage;
/**
* The final state of the user-defined context object.
*
* Experimental (can break in patch releases).
*
* @default undefined
*/
experimental_context: unknown;
/** Identifier from telemetry settings for grouping related operations. */
readonly functionId: string | undefined;
/** Additional metadata from telemetry settings. */
readonly metadata: Record<string, unknown> | undefined;
};
/**
* Function that you can use to provide different settings for a step.
*
* @param options - The options for the step.
* @param options.steps - The steps that have been executed so far.
* @param options.stepNumber - The number of the step that is being executed.
* @param options.model - The model that is being used.
* @param options.messages - The messages that will be sent to the model for the current step.
* @param options.experimental_context - The context passed via the experimental_context setting (experimental).
*
* @returns An object that contains the settings for the step.
* If you return undefined (or for undefined settings), the settings from the outer level will be used.
*/
type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Tool>> = (options: {
/**
* The steps that have been executed so far.
*/
steps: Array<StepResult<NoInfer<TOOLS>>>;
/**
* The number of the step that is being executed.
*/
stepNumber: number;
/**
* The model instance that is being used for this step.
*/
model: LanguageModel;
/**
* The messages that will be sent to the model for the current step.
*/
messages: Array<ModelMessage>;
/**
* The context passed via the experimental_context setting (experimental).
*/
experimental_context: unknown;
}) => PromiseLike<PrepareStepResult<TOOLS>> | PrepareStepResult<TOOLS>;
/**
* The result type returned by a {@link PrepareStepFunction},
* allowing per-step overrides of model, tools, or messages.
*/
type PrepareStepResult<TOOLS extends Record<string, Tool> = Record<string, Tool>> = {
/**
* Optionally override which LanguageModel instance is used for this step.
*/
model?: LanguageModel;
/**
* Optionally set which tool the model must call, or provide tool call configuration
* for this step.
*/
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
/**
* If provided, only these tools are enabled/available for this step.
*/
activeTools?: Array<keyof NoInfer<TOOLS>>;
/**
* Optionally override the system message(s) sent to the model for this step.
*/
system?: string | SystemModelMessage | Array<SystemModelMessage>;
/**
* Optionally override the full set of messages sent to the model
* for this step.
*/
messages?: Array<ModelMessage>;
/**
* Context that is passed into tool execution. Experimental.
*
* Changing the context will affect the context in this step
* and all subsequent steps.
*/
experimental_context?: unknown;
/**
* Additional provider-specific options for this step.
*
* Can be used to pass provider-specific configuration such as
* container IDs for Anthropic's code execution.
*/
providerOptions?: ProviderOptions;
} | undefined;
declare const symbol$j: unique symbol;
declare class InvalidToolInputError extends AISDKError {
private readonly [symbol$j];
readonly toolName: string;
readonly toolInput: string;
constructor({ toolInput, toolName, cause, message, }: {
message?: string;
toolInput: string;
toolName: string;
cause: unknown;
});
static isInstance(error: unknown): error is InvalidToolInputError;
}
declare const symbol$i: unique symbol;
declare class NoSuchToolError extends AISDKError {
private readonly [symbol$i];
readonly toolName: string;
readonly availableTools: string[] | undefined;
constructor({ toolName, availableTools, message, }: {
toolName: string;
availableTools?: string[] | undefined;
message?: string;
});
static isInstance(error: unknown): error is NoSuchToolError;
}
declare const systemModelMessageSchema: z.ZodType<SystemModelMessage>;
declare const userModelMessageSchema: z.ZodType<UserModelMessage>;
declare const assistantModelMessageSchema: z.ZodType<AssistantModelMessage>;
declare const toolModelMessageSchema: z.ZodType<ToolModelMessage>;
declare const modelMessageSchema: z.ZodType<ModelMessage>;
/**
* A function that attempts to repair a tool call that failed to parse.
*
* It receives the error and the context as arguments and returns the repair
* tool call JSON as text.
*
* @param options.system - The system prompt.
* @param options.messages - The messages in the current generation step.
* @param options.toolCall - The tool call that failed to parse.
* @param options.tools - The tools that are available.
* @param options.inputSchema - A function that returns the JSON Schema for a tool.
* @param options.error - The error that occurred while parsing the tool call.
*/
type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
system: string | SystemModelMessage | Array<SystemModelMessage> | undefined;
messages: ModelMessage[];
toolCall: LanguageModelV3ToolCall;
tools: TOOLS;
inputSchema: (options: {
toolName: string;
}) => PromiseLike<JSONSchema7>;
error: NoSuchToolError | InvalidToolInputError;
}) => Promise<LanguageModelV3ToolCall | null>;
/**
* Include settings for generateText (requestBody and responseBody).
*/
type GenerateTextIncludeSettings = {
requestBody?: boolean;
responseBody?: boolean;
};
/**
* Callback that is set using the `experimental_onStart` option.
*
* Called when the generateText operation begins, before any LLM calls.
* Use this callback for logging, analytics, or initializing state at the
* start of a generation.
*
* @param event - The event object containing generation configuration.
*/
type GenerateTextOnStartCallback<TOOLS extends ToolSet = ToolSet, OUTPUT extends Output = Output> = (event: OnStartEvent<TOOLS, OUTPUT, GenerateTextIncludeSettings>) => PromiseLike<void> | void;
/**
* Callback that is set using the `experimental_onStepStart` option.
*
* Called when a step (LLM call) begins, before the provider is called.
* Each step represents a single LLM invocation. Multiple steps occur when
* using tool calls (the model may be called multiple times in a loop).
*
* @param event - The event object containing step configuration.
*/
type GenerateTextOnStepStartCallback<TOOLS extends ToolSet = ToolSet, OUTPUT extends Output = Output> = (event: OnStepStartEvent<TOOLS, OUTPUT, GenerateTextIncludeSettings>) => PromiseLike<void> | void;
/**
* Callback that is set using the `experimental_onToolCallStart` option.
*
* Called when a tool execution begins, before the tool's `execute` function is invoked.
* Use this for logging tool invocations, tracking tool usage, or pre-execution validation.
*
* @param event - The event object containing tool call information.
*/
type GenerateTextOnToolCallStartCallback<TOOLS extends ToolSet = ToolSet> = (event: OnToolCallStartEvent<TOOLS>) => PromiseLike<void> | void;
/**
* Callback that is set using the `experimental_onToolCallFinish` option.
*
* Called when a tool execution completes, either successfully or with an error.
* Use this for logging tool results, tracking execution time, or error handling.
*
* The event uses a discriminated union on the `success` field:
* - When `success: true`: `output` contains the tool result, `error` is never present.
* - When `success: false`: `error` contains the error, `output` is never present.
*
* @param event - The event object containing tool call result information.
*/
type GenerateTextOnToolCallFinishCallback<TOOLS extends ToolSet = ToolSet> = (event: OnToolCallFinishEvent<TOOLS>) => PromiseLike<void> | void;
/**
* Callback that is set using the `onStepFinish` option.
*
* Called when a step (LLM call) completes. The event includes all step result
* properties (text, tool calls, usage, etc.) along with additional metadata.
*
* @param stepResult - The result of the step.
*/
type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (event: OnStepFinishEvent<TOOLS>) => Promise<void> | void;
/**
* Callback that is set using the `onFinish` option.
*
* Called when the entire generation completes (all steps finished).
* The event includes the final step's result properties along with
* aggregated data from all steps.
*
* @param event - The final result along with aggregated step data.
*/
type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (event: OnFinishEvent<TOOLS>) => PromiseLike<void> | void;
/**
* Generate a text and call tools for a given prompt using a language model.
*
* This function does not stream the output. If you want to stream the output, use `streamText` instead.
*
* @param model - The language model to use.
*
* @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
* @param toolChoice - The tool choice strategy. Default: 'auto'.
*
* @param system - A system message that will be part of the prompt.
* @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
* @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
*
* @param maxOutputTokens - Maximum number of tokens to generate.
* @param temperature - Temperature setting.
* The value is passed through to the provider. The range depends on the provider and model.
* It is recommended to set either `temperature` or `topP`, but not both.
* @param topP - Nucleus sampling.
* The value is passed through to the provider. The range depends on the provider and model.
* It is recommended to set either `temperature` or `topP`, but not both.
* @param topK - Only sample from the top K options for each subsequent token.
* Used to remove "long tail" low probability responses.
* Recommended for advanced use