@tanstack/ai
Version:
Core TanStack AI library - Open source AI SDK
187 lines (186 loc) • 7.73 kB
TypeScript
import { AnyTextAdapter } from './adapter.js';
import { ConstrainedModelMessage, InferSchemaType, SchemaInput, StreamChunk, TextOptions } from '../../types.js';
import { ChatMiddleware } from './middleware/types.js';
/** The adapter kind this activity handles */
export declare const kind: "text";
/**
* Options for the text activity.
* Types are extracted directly from the adapter (which has pre-resolved generics).
*
* @template TAdapter - The text adapter type (created by a provider function)
* @template TSchema - Optional Standard Schema for structured output
* @template TStream - Whether to stream the output (default: true)
*/
export interface TextActivityOptions<TAdapter extends AnyTextAdapter, TSchema extends SchemaInput | undefined, TStream extends boolean> {
/** The text adapter to use (created by a provider function like openaiText('gpt-4o')) */
adapter: TAdapter;
/** Conversation messages - content types are constrained by the adapter's input modalities and metadata */
messages?: Array<ConstrainedModelMessage<{
inputModalities: TAdapter['~types']['inputModalities'];
messageMetadataByModality: TAdapter['~types']['messageMetadataByModality'];
}>>;
/** System prompts to prepend to the conversation */
systemPrompts?: TextOptions['systemPrompts'];
/** Tools for function calling (auto-executed when called) */
tools?: TextOptions['tools'];
/** Controls the randomness of the output. Higher values make output more random. Range: [0.0, 2.0] */
temperature?: TextOptions['temperature'];
/** Nucleus sampling parameter. The model considers tokens with topP probability mass. */
topP?: TextOptions['topP'];
/** The maximum number of tokens to generate in the response. */
maxTokens?: TextOptions['maxTokens'];
/** Additional metadata to attach to the request. */
metadata?: TextOptions['metadata'];
/** Model-specific provider options (type comes from adapter) */
modelOptions?: TAdapter['~types']['providerOptions'];
/** AbortController for cancellation */
abortController?: TextOptions['abortController'];
/** Strategy for controlling the agent loop */
agentLoopStrategy?: TextOptions['agentLoopStrategy'];
/** Unique conversation identifier for tracking */
conversationId?: TextOptions['conversationId'];
/**
* Optional Standard Schema for structured output.
* When provided, the activity will:
* 1. Run the full agentic loop (executing tools as needed)
* 2. Once complete, return a Promise with the parsed output matching the schema
*
* Supports any Standard Schema compliant library (Zod v4+, ArkType, Valibot, etc.)
*
* @example
* ```ts
* const result = await chat({
* adapter: openaiText('gpt-4o'),
* messages: [{ role: 'user', content: 'Generate a person' }],
* outputSchema: z.object({ name: z.string(), age: z.number() })
* })
* // result is { name: string, age: number }
* ```
*/
outputSchema?: TSchema;
/**
* Whether to stream the text result.
* When true (default), returns an AsyncIterable<StreamChunk> for streaming output.
* When false, returns a Promise<string> with the collected text content.
*
* Note: If outputSchema is provided, this option is ignored and the result
* is always a Promise<InferSchemaType<TSchema>>.
*
* @default true
*
* @example Non-streaming text
* ```ts
* const text = await chat({
* adapter: openaiText('gpt-4o'),
* messages: [{ role: 'user', content: 'Hello!' }],
* stream: false
* })
* // text is a string with the full response
* ```
*/
stream?: TStream;
/**
* Optional middleware array for observing/transforming chat behavior.
* Middleware hooks are called in array order. See {@link ChatMiddleware} for available hooks.
*
* @example
* ```ts
* const stream = chat({
* adapter: openaiText('gpt-4o'),
* messages: [...],
* middleware: [loggingMiddleware, redactionMiddleware],
* })
* ```
*/
middleware?: Array<ChatMiddleware>;
/**
* Opaque user-provided context value passed to middleware hooks.
* Can be used to pass request-scoped data (e.g., user ID, request context).
*/
context?: unknown;
}
/**
* Create typed options for the chat() function without executing.
* This is useful for pre-defining configurations with full type inference.
*
* @example
* ```ts
* const chatOptions = createChatOptions({
* adapter: anthropicText('claude-sonnet-4-5'),
* })
*
* const stream = chat({ ...chatOptions, messages })
* ```
*/
export declare function createChatOptions<TAdapter extends AnyTextAdapter, TSchema extends SchemaInput | undefined = undefined, TStream extends boolean = true>(options: TextActivityOptions<TAdapter, TSchema, TStream>): TextActivityOptions<TAdapter, TSchema, TStream>;
/**
* Result type for the text activity.
* - If outputSchema is provided: Promise<InferSchemaType<TSchema>>
* - If stream is false: Promise<string>
* - Otherwise (stream is true, default): AsyncIterable<StreamChunk>
*/
export type TextActivityResult<TSchema extends SchemaInput | undefined, TStream extends boolean = true> = TSchema extends SchemaInput ? Promise<InferSchemaType<TSchema>> : TStream extends false ? Promise<string> : AsyncIterable<StreamChunk>;
/**
* Text activity - handles agentic text generation, one-shot text generation, and agentic structured output.
*
* This activity supports four modes:
* 1. **Streaming agentic text**: Stream responses with automatic tool execution
* 2. **Streaming one-shot text**: Simple streaming request/response without tools
* 3. **Non-streaming text**: Returns collected text as a string (stream: false)
* 4. **Agentic structured output**: Run tools, then return structured data
*
* @example Full agentic text (streaming with tools)
* ```ts
* import { chat } from '@tanstack/ai'
* import { openaiText } from '@tanstack/ai-openai'
*
* for await (const chunk of chat({
* adapter: openaiText('gpt-4o'),
* messages: [{ role: 'user', content: 'What is the weather?' }],
* tools: [weatherTool]
* })) {
* if (chunk.type === 'content') {
* console.log(chunk.delta)
* }
* }
* ```
*
* @example One-shot text (streaming without tools)
* ```ts
* for await (const chunk of chat({
* adapter: openaiText('gpt-4o'),
* messages: [{ role: 'user', content: 'Hello!' }]
* })) {
* console.log(chunk)
* }
* ```
*
* @example Non-streaming text (stream: false)
* ```ts
* const text = await chat({
* adapter: openaiText('gpt-4o'),
* messages: [{ role: 'user', content: 'Hello!' }],
* stream: false
* })
* // text is a string with the full response
* ```
*
* @example Agentic structured output (tools + structured response)
* ```ts
* import { z } from 'zod'
*
* const result = await chat({
* adapter: openaiText('gpt-4o'),
* messages: [{ role: 'user', content: 'Research and summarize the topic' }],
* tools: [researchTool, analyzeTool],
* outputSchema: z.object({
* summary: z.string(),
* keyPoints: z.array(z.string())
* })
* })
* // result is { summary: string, keyPoints: string[] }
* ```
*/
export declare function chat<TAdapter extends AnyTextAdapter, TSchema extends SchemaInput | undefined = undefined, TStream extends boolean = true>(options: TextActivityOptions<TAdapter, TSchema, TStream>): TextActivityResult<TSchema, TStream>;
export type { TextAdapter, TextAdapterConfig, StructuredOutputOptions, StructuredOutputResult, } from './adapter.js';
export { BaseTextAdapter } from './adapter.js';