ai
Version:
AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript
464 lines (419 loc) • 13.7 kB
text/typescript
import { IdGenerator } from '@ai-sdk/provider-utils';
import { ServerResponse } from 'node:http';
import {
CallWarning,
FinishReason,
LanguageModelRequestMetadata,
ProviderMetadata,
} from '../types';
import { Source } from '../types/language-model';
import { LanguageModelResponseMetadata } from '../types/language-model-response-metadata';
import { LanguageModelUsage } from '../types/usage';
import { InferUIMessageChunk } from '../ui-message-stream/ui-message-chunks';
import { UIMessageStreamOnFinishCallback } from '../ui-message-stream/ui-message-stream-on-finish-callback';
import { UIMessageStreamResponseInit } from '../ui-message-stream/ui-message-stream-response-init';
import { InferUIMessageMetadata, UIMessage } from '../ui/ui-messages';
import { AsyncIterableStream } from '../util/async-iterable-stream';
import { ErrorHandler } from '../util/error-handler';
import { ContentPart } from './content-part';
import { GeneratedFile } from './generated-file';
import { Output } from './output';
import {
InferCompleteOutput,
InferElementOutput,
InferPartialOutput,
} from './output-utils';
import { ReasoningOutput } from './reasoning-output';
import { ResponseMessage } from './response-message';
import { StepResult } from './step-result';
import { ToolApprovalRequestOutput } from './tool-approval-request-output';
import { DynamicToolCall, StaticToolCall, TypedToolCall } from './tool-call';
import { TypedToolError } from './tool-error';
import { StaticToolOutputDenied } from './tool-output-denied';
import {
DynamicToolResult,
StaticToolResult,
TypedToolResult,
} from './tool-result';
import { ToolSet } from './tool-set';
export type UIMessageStreamOptions<UI_MESSAGE extends UIMessage> = {
/**
* The original messages. If they are provided, persistence mode is assumed,
* and a message ID is provided for the response message.
*/
originalMessages?: UI_MESSAGE[];
/**
* Generate a message ID for the response message.
*
* If not provided, no message ID will be set for the response message (unless
* the original messages are provided and the last message is an assistant message).
*/
generateMessageId?: IdGenerator;
onFinish?: UIMessageStreamOnFinishCallback<UI_MESSAGE>;
/**
* Extracts message metadata that will be send to the client.
*
* Called on `start` and `finish` events.
*/
messageMetadata?: (options: {
part: TextStreamPart<ToolSet>;
}) => InferUIMessageMetadata<UI_MESSAGE> | undefined;
/**
* Send reasoning parts to the client.
* Default to true.
*/
sendReasoning?: boolean;
/**
* Send source parts to the client.
* Default to false.
*/
sendSources?: boolean;
/**
* Send the finish event to the client.
* Set to false if you are using additional streamText calls
* that send additional data.
* Default to true.
*/
sendFinish?: boolean;
/**
* Send the message start event to the client.
* Set to false if you are using additional streamText calls
* and the message start event has already been sent.
* Default to true.
*/
sendStart?: boolean;
/**
* Process an error, e.g. to log it. Default to `() => 'An error occurred.'`.
*
* @returns error message to include in the data stream.
*/
onError?: (error: unknown) => string;
};
export type ConsumeStreamOptions = {
onError?: ErrorHandler;
};
/**
* A result object for accessing different stream types and additional information.
*/
export interface StreamTextResult<
TOOLS extends ToolSet,
OUTPUT extends Output,
> {
/**
* The content that was generated in the last step.
*
* Automatically consumes the stream.
*/
readonly content: PromiseLike<Array<ContentPart<TOOLS>>>;
/**
* The full text that has been generated by the last step.
*
* Automatically consumes the stream.
*/
readonly text: PromiseLike<string>;
/**
* The full reasoning that the model has generated.
*
* Automatically consumes the stream.
*/
readonly reasoning: PromiseLike<Array<ReasoningOutput>>;
/**
* The reasoning that has been generated by the last step.
*
* Automatically consumes the stream.
*/
readonly reasoningText: PromiseLike<string | undefined>;
/**
* Files that have been generated by the model in the last step.
*
* Automatically consumes the stream.
*/
readonly files: PromiseLike<GeneratedFile[]>;
/**
* Sources that have been used as references in the last step.
*
* Automatically consumes the stream.
*/
readonly sources: PromiseLike<Source[]>;
/**
* The tool calls that have been executed in the last step.
*
* Automatically consumes the stream.
*/
readonly toolCalls: PromiseLike<TypedToolCall<TOOLS>[]>;
/**
* The static tool calls that have been executed in the last step.
*
* Automatically consumes the stream.
*/
readonly staticToolCalls: PromiseLike<StaticToolCall<TOOLS>[]>;
/**
* The dynamic tool calls that have been executed in the last step.
*
* Automatically consumes the stream.
*/
readonly dynamicToolCalls: PromiseLike<DynamicToolCall[]>;
/**
* The static tool results that have been generated in the last step.
*
* Automatically consumes the stream.
*/
readonly staticToolResults: PromiseLike<StaticToolResult<TOOLS>[]>;
/**
* The dynamic tool results that have been generated in the last step.
*
* Automatically consumes the stream.
*/
readonly dynamicToolResults: PromiseLike<DynamicToolResult[]>;
/**
* The tool results that have been generated in the last step.
*
* Automatically consumes the stream.
*/
readonly toolResults: PromiseLike<TypedToolResult<TOOLS>[]>;
/**
* The unified finish reason why the generation finished. Taken from the last step.
*
* Automatically consumes the stream.
*/
readonly finishReason: PromiseLike<FinishReason>;
/**
* The raw reason why the generation finished (from the provider). Taken from the last step.
*
* Automatically consumes the stream.
*/
readonly rawFinishReason: PromiseLike<string | undefined>;
/**
* The token usage of the last step.
*
* Automatically consumes the stream.
*/
readonly usage: PromiseLike<LanguageModelUsage>;
/**
* The total token usage of the generated response.
* When there are multiple steps, the usage is the sum of all step usages.
*
* Automatically consumes the stream.
*/
readonly totalUsage: PromiseLike<LanguageModelUsage>;
/**
* Warnings from the model provider (e.g. unsupported settings) for the first step.
*
* Automatically consumes the stream.
*/
readonly warnings: PromiseLike<CallWarning[] | undefined>;
/**
* Details for all steps.
* You can use this to get information about intermediate steps,
* such as the tool calls or the response headers.
*
* Automatically consumes the stream.
*/
readonly steps: PromiseLike<Array<StepResult<TOOLS>>>;
/**
* Additional request information from the last step.
*
* Automatically consumes the stream.
*/
readonly request: PromiseLike<LanguageModelRequestMetadata>;
/**
* Additional response information from the last step.
*
* Automatically consumes the stream.
*/
readonly response: PromiseLike<
LanguageModelResponseMetadata & {
/**
* The response messages that were generated during the call. It consists of an assistant message,
* potentially containing tool calls.
*
* When there are tool results, there is an additional tool message with the tool results that are available.
* If there are tools that do not have execute functions, they are not included in the tool results and
* need to be added separately.
*/
messages: Array<ResponseMessage>;
}
>;
/**
* Additional provider-specific metadata from the last step.
* Metadata is passed through from the provider to the AI SDK and
* enables provider-specific results that can be fully encapsulated in the provider.
*/
readonly providerMetadata: PromiseLike<ProviderMetadata | undefined>;
/**
* A text stream that returns only the generated text deltas. You can use it
* as either an AsyncIterable or a ReadableStream. When an error occurs, the
* stream will throw the error.
*/
readonly textStream: AsyncIterableStream<string>;
/**
* A stream with all events, including text deltas, tool calls, tool results, and
* errors.
* You can use it as either an AsyncIterable or a ReadableStream.
* Only errors that stop the stream, such as network errors, are thrown.
*/
readonly fullStream: AsyncIterableStream<TextStreamPart<TOOLS>>;
/**
* A stream of partial outputs. It uses the `output` specification.
*
* @deprecated Use `partialOutputStream` instead.
*/
readonly experimental_partialOutputStream: AsyncIterableStream<
InferPartialOutput<OUTPUT>
>;
/**
* A stream of partial parsed outputs. It uses the `output` specification.
*/
readonly partialOutputStream: AsyncIterableStream<InferPartialOutput<OUTPUT>>;
/**
* A stream of individual array elements as they complete.
* Only available when using `output: Output.array()`.
*/
readonly elementStream: AsyncIterableStream<InferElementOutput<OUTPUT>>;
/**
* The complete parsed output. It uses the `output` specification.
*/
readonly output: PromiseLike<InferCompleteOutput<OUTPUT>>;
/**
* Consumes the stream without processing the parts.
* This is useful to force the stream to finish.
* It effectively removes the backpressure and allows the stream to finish,
* triggering the `onFinish` callback and the promise resolution.
*
* If an error occurs, it is passed to the optional `onError` callback.
*/
consumeStream(options?: ConsumeStreamOptions): PromiseLike<void>;
/**
* Converts the result to a UI message stream.
*
* @returns A UI message stream.
*/
toUIMessageStream<UI_MESSAGE extends UIMessage>(
options?: UIMessageStreamOptions<UI_MESSAGE>,
): AsyncIterableStream<InferUIMessageChunk<UI_MESSAGE>>;
/**
* Writes UI message stream output to a Node.js response-like object.
*/
pipeUIMessageStreamToResponse<UI_MESSAGE extends UIMessage>(
response: ServerResponse,
options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>,
): void;
/**
* Writes text delta output to a Node.js response-like object.
* It sets a `Content-Type` header to `text/plain; charset=utf-8` and
* writes each text delta as a separate chunk.
*
* @param response A Node.js response-like object (ServerResponse).
* @param init Optional headers, status code, and status text.
*/
pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
/**
* Converts the result to a streamed response object with a stream data part stream.
*
* @returns A response object.
*/
toUIMessageStreamResponse<UI_MESSAGE extends UIMessage>(
options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>,
): Response;
/**
* Creates a simple text stream response.
* Each text delta is encoded as UTF-8 and sent as a separate chunk.
* Non-text-delta events are ignored.
* @param init Optional headers, status code, and status text.
*/
toTextStreamResponse(init?: ResponseInit): Response;
}
export type TextStreamPart<TOOLS extends ToolSet> =
| {
type: 'text-start';
id: string;
providerMetadata?: ProviderMetadata;
}
| {
type: 'text-end';
id: string;
providerMetadata?: ProviderMetadata;
}
| {
type: 'text-delta';
id: string;
providerMetadata?: ProviderMetadata;
text: string;
}
| {
type: 'reasoning-start';
id: string;
providerMetadata?: ProviderMetadata;
}
| {
type: 'reasoning-end';
id: string;
providerMetadata?: ProviderMetadata;
}
| {
type: 'reasoning-delta';
providerMetadata?: ProviderMetadata;
id: string;
text: string;
}
| {
type: 'tool-input-start';
id: string;
toolName: string;
providerMetadata?: ProviderMetadata;
providerExecuted?: boolean;
dynamic?: boolean;
title?: string;
}
| {
type: 'tool-input-end';
id: string;
providerMetadata?: ProviderMetadata;
}
| {
type: 'tool-input-delta';
id: string;
delta: string;
providerMetadata?: ProviderMetadata;
}
| ({ type: 'source' } & Source)
| { type: 'file'; file: GeneratedFile } // different because of GeneratedFile object
| ({ type: 'tool-call' } & TypedToolCall<TOOLS>)
| ({ type: 'tool-result' } & TypedToolResult<TOOLS>)
| ({ type: 'tool-error' } & TypedToolError<TOOLS>)
| ({ type: 'tool-output-denied' } & StaticToolOutputDenied<TOOLS>)
| ToolApprovalRequestOutput<TOOLS>
| {
type: 'start-step';
request: LanguageModelRequestMetadata;
warnings: CallWarning[];
}
| {
type: 'finish-step';
response: LanguageModelResponseMetadata;
usage: LanguageModelUsage;
finishReason: FinishReason;
rawFinishReason: string | undefined;
providerMetadata: ProviderMetadata | undefined;
}
| {
type: 'start';
}
| {
type: 'finish';
finishReason: FinishReason;
rawFinishReason: string | undefined;
totalUsage: LanguageModelUsage;
}
| {
type: 'abort';
reason?: string;
}
| {
type: 'error';
error: unknown;
}
| {
type: 'raw';
rawValue: unknown;
};