axiom
Version:
Axiom AI SDK provides - an API to wrap your AI calls with observability instrumentation. - offline evals - online evals
396 lines (381 loc) • 16.9 kB
TypeScript
import { Tracer, Span } from '@opentelemetry/api';
import { LanguageModelV1 } from '@ai-sdk/providerv1';
import { LanguageModelV2, LanguageModelV2Middleware } from '@ai-sdk/providerv2';
import { LanguageModelV3, LanguageModelV3Middleware } from '@ai-sdk/providerv3';
import { V as ValidateName } from './name-validation.d-BKPGh6r3.js';
import { onlineEval as onlineEval$1 } from './evals/online.js';
import { LanguageModelV1Middleware } from 'aiv4';
export { c as createAppScope } from './app-scope-BgNUnFZY.js';
import * as _sinclair_typebox from '@sinclair/typebox';
import { TSchema as TSchema$1, Static, Type as Type$1 } from '@sinclair/typebox';
import './scorers/aggregations.js';
import 'zod';
type CaptureMessageContent = 'full' | 'off';
type AxiomAIRedactionPolicy = {
captureMessageContent?: CaptureMessageContent;
mirrorToolPayloadOnToolSpan?: boolean;
};
declare const RedactionPolicy: {
/**
* Includes message content on chat spans, and mirrors tool
* payload on tool spans for more convenient querying.
*/
readonly AxiomDefault: {
readonly captureMessageContent: "full";
readonly mirrorToolPayloadOnToolSpan: true;
};
/**
* Redacts message content on chat spans, and does not put
* tool payload on tool spans.
*/
readonly OpenTelemetryDefault: {
readonly captureMessageContent: "off";
readonly mirrorToolPayloadOnToolSpan: false;
};
};
/**
* Register this in your `instrumentation.ts` to set up axiom.
* This function stores the tracer's scope information globally to enable Context Propagation
* and Instrumentation Scope. The tracer will be available across all execution contexts including Next.js.
*
* This function is idempotent - calling it multiple times with the same scope has no additional cost.
*
* @param config
* @param config.tracer - The tracer that you are using in your application.
* @param config.redactionPolicy - Optional redaction policy to control what data is captured in spans.
*/
declare function initAxiomAI(config: {
tracer: Tracer;
redactionPolicy?: AxiomAIRedactionPolicy;
}): void;
/**
* Get a tracer using the globally stored scope information
* Fall back to package.json defaults if not set
*/
declare function getGlobalTracer(): Tracer;
/**
* Reset AxiomAI configuration (useful for testing)
*/
declare function resetAxiomAI(): void;
/**
* Wraps an AI SDK model to provide OpenTelemetry instrumentation.
*
* Supports AI SDK v4 (LanguageModelV1), v5 (LanguageModelV2) and v6 (LanguageModelV3) models.
*
* @param model - Language model implementing LanguageModelV1, LanguageModelV2 or LanguageModelV3 interface
* @returns Wrapped model with identical interface but added instrumentation
*/
declare function wrapAISDKModel<T extends LanguageModelV1 | LanguageModelV2 | LanguageModelV3>(model: T): T;
/**
* Metadata for categorizing and tracking spans within the AI application.
*/
type WithSpanMeta = {
/** High-level capability being performed (e.g., 'text_generation', 'chat', 'analysis') */
capability: string;
/** Specific step within the capability (e.g., 'generate_response', 'summarize', 'extract') */
step: string;
/** Optional conversation ID to correlate spans across a multi-turn conversation */
conversationId?: string;
};
/**
* Wrap Vercel AI SDK functions like `generateText` and `streamText` in an OpenTelemetry span.
*
* Automatically detects and handles different response types:
* - **Response streams**: Keeps span alive during entire stream consumption
* - **Streaming objects**: Warns about incorrect usage patterns
* - **Regular objects**: Ends span immediately after function completion
*
* The span name will be updated by the AI SDK middleware from 'chat'
* to a model-specific name like 'chat gpt-4o-mini' when used with wrapped models.
*
* @param meta - Span metadata for categorization and tracking
* @param meta.capability - High-level capability being performed (e.g., 'customer_support', 'meeting_summarizer')
* @param meta.step - Specific step within the capability (e.g., 'categorize_message', 'transcribe_audio')
* @param fn - Function to execute within the span context. Receives the span as a parameter so you can add additional attributes.
* @param opts - Optional configuration
* @param opts.tracer - Custom OpenTelemetry tracer instance. Defaults to the tracer provided by `initAxiomAI`.
* @param opts.timeoutMs - Timeout for abandoned streams. Defaults to 600,000 (10 minutes).
* @param opts.redactionPolicy - Optional redaction policy to override global policy for this span.
*
* @returns Promise that resolves to the same value as the wrapped function
*
* @example
* // Non-streaming usage
* const result = await withSpan(
* { capability: 'text_generation', step: 'generate' },
* async (span) => {
* span.setAttribute('user.id', userId); // can set attributes on the span
* const result = await generateObject({ model, prompt });
* // can do something with the result here, eg set additional attributes
* return result
* }
* );
*
* @example
* // Streaming usage with `@ai-sdk/react` in the frontend
* ```ts
* const response = withSpan(
* { capability: 'chat', step: 'stream_chat' },
* async (span) => {
* span.setAttribute('user.id', userId);
* const result = streamText({ model, messages });
* return result.toUIMessageStreamResponse();
* }
* );
* ```
*
* @example
* // Streaming usage with express
* ```ts
* await withSpan(
* { capability: 'chat', step: 'stream_chat' },
* async (span) => {
* span.setAttribute('user.id', userId);
*
* const { textStream } = streamText({ model, messages });
*
* // Keep span open during entire stream consumption
* for await (const chunk of textStream) {
* res.write(chunk);
* }
* }
* );
* res.end();
* ```
*/
/**
* Options for withSpan configuration.
*/
type WithSpanOptions = {
/** Custom OpenTelemetry tracer instance */
tracer?: Tracer;
/** Timeout for abandoned streams (default: 600,000ms / 10 minutes) */
timeoutMs?: number;
/** Redaction policy to override global policy for this span */
redactionPolicy?: AxiomAIRedactionPolicy;
};
declare function withSpan<Return, Capability extends string = string, Step extends string = string>(meta: WithSpanMeta & {
capability: ValidateName<Capability>;
step: ValidateName<Step>;
}, fn: (span: Span) => Promise<Return>, opts?: WithSpanOptions): Promise<Return>;
interface ToolLike {
execute?: (...args: any[]) => any;
description?: string;
[key: string]: any;
}
/**
* Wraps a tool to create child spans when the tool's execute method is called.
*
* @param toolName The name of the tool (key from the tools object) - span name will be `execute_tool <toolName>`
* @param tool The tool to wrap
* @returns The same tool but with a wrapped execute method that creates spans
*/
declare function wrapTool<T extends ToolLike>(toolName: string, tool: T): T;
/**
* Wraps multiple tools to create child spans when their execute methods are called.
*
* @param tools An object containing tools to wrap
* @returns The same object with all tools wrapped
*/
declare function wrapTools<T extends Record<string, ToolLike>>(tools: T): T;
interface AxiomTelemetryConfig {
}
/**
* Creates Axiom telemetry middleware for LanguageModelV1
*/
declare function axiomAIMiddlewareV1(): LanguageModelV1Middleware;
/**
* Creates unified Axiom telemetry middleware that works with V1, V2 and V3 models
*/
declare function axiomAIMiddleware(config: {
model: LanguageModelV1;
}): LanguageModelV1Middleware;
declare function axiomAIMiddleware(config: {
model: LanguageModelV2;
}): LanguageModelV2Middleware;
declare function axiomAIMiddleware(config: {
model: LanguageModelV3;
}): LanguageModelV3Middleware;
/**
* Creates Axiom telemetry middleware for LanguageModelV2
*/
declare function axiomAIMiddlewareV2(): LanguageModelV2Middleware;
/**
* Creates Axiom telemetry middleware for LanguageModelV3
*/
declare function axiomAIMiddlewareV3(): LanguageModelV3Middleware;
/**
* Template-friendly TypeBox types for use with Handlebars.
*
* These types are designed to be easily serializable and work well in template contexts
* where data needs to be rendered as strings or used in conditionals and loops.
*
* All composite types (Array, Object, etc.) only accept other Template types.
*/
declare const SchemaBrand: unique symbol;
type TSchema<T extends TSchema$1 = TSchema$1> = T & {
[SchemaBrand]: true;
};
/**
* Utility type to infer TypeScript types from {@link TSchema}.
*/
type InferSchema<T extends TSchema> = Static<T>;
/**
* Utility type to infer context types from {@link Prompt} arguments.
*
* Used with {@link parse} function to ensure type safety when providing context values.
*/
type InferContext<T extends Record<string, TSchema>> = Static<ReturnType<typeof createObject<T>>>;
type OmitFirst<T extends any[]> = T extends [any, ...infer R] ? R : never;
declare const createObject: <T extends Record<string, TSchema>>(properties: T, ...args: OmitFirst<Parameters<typeof Type$1.Object>>) => TSchema<_sinclair_typebox.TObject<T>>;
declare const Type: {
readonly String: (options?: _sinclair_typebox.StringOptions | undefined) => TSchema<_sinclair_typebox.TString>;
readonly Number: (options?: _sinclair_typebox.NumberOptions | undefined) => TSchema<_sinclair_typebox.TNumber>;
readonly Integer: (options?: _sinclair_typebox.IntegerOptions | undefined) => TSchema<_sinclair_typebox.TInteger>;
readonly Boolean: (options?: _sinclair_typebox.SchemaOptions | undefined) => TSchema<_sinclair_typebox.TBoolean>;
readonly Null: (options?: _sinclair_typebox.SchemaOptions | undefined) => TSchema<_sinclair_typebox.TNull>;
readonly Undefined: (options?: _sinclair_typebox.SchemaOptions | undefined) => TSchema<_sinclair_typebox.TUndefined>;
readonly Literal: <T extends string | number | boolean>(value: T, options?: _sinclair_typebox.SchemaOptions | undefined) => TSchema<_sinclair_typebox.TLiteral<T>>;
readonly Array: <T extends TSchema>(items: T, options?: _sinclair_typebox.ArrayOptions | undefined) => TSchema<_sinclair_typebox.TArray<T>>;
readonly Object: <T extends Record<string, TSchema>>(properties: T, options?: _sinclair_typebox.ObjectOptions | undefined) => TSchema<_sinclair_typebox.TObject<T>>;
readonly Record: <V extends TSchema>(value: V, options?: _sinclair_typebox.ObjectOptions | undefined) => TSchema<_sinclair_typebox.TRecord<_sinclair_typebox.TString, V>>;
readonly Tuple: <T extends TSchema[]>(types: [...T], options?: _sinclair_typebox.SchemaOptions | undefined) => TSchema<_sinclair_typebox.TTuple<T>>;
readonly Optional: <T extends TSchema>(schema: T) => TSchema<T extends _sinclair_typebox.TOptional<infer S extends TSchema$1> ? _sinclair_typebox.TOptional<S> : _sinclair_typebox.Ensure<_sinclair_typebox.TOptional<T>>>;
readonly Union: <T extends TSchema[]>(schemas: [...T], options?: _sinclair_typebox.SchemaOptions | undefined) => TSchema<_sinclair_typebox.Union<T>>;
};
/**
* Configuration options for language model generation.
*
* @experimental This API is experimental and may change in future versions.
*/
type ModelParams = {
/** Maximum number of tokens to generate */
maxOutputTokens?: number;
/** Controls randomness in generation (0.0 to 2.0) */
temperature?: number;
/** Controls nucleus sampling for token selection */
topP?: number;
/** Controls top-k sampling for token selection */
topK?: number;
/** Penalty for repeating content (presence) */
presencePenalty?: number;
/** Penalty for repeating tokens (frequency) */
frequencyPenalty?: number;
/** Sequences that will stop generation */
stopSequences?: string[];
/** Seed for deterministic generation */
seed?: number;
/** Maximum number of retry attempts */
maxRetries?: number;
};
type PromptMessage$1 = {
role: 'system' | 'user' | 'assistant' | 'tool';
content: string;
};
/**
* Complete prompt definition with all metadata and versioning information.
*
* Extended version of {@link PromptInput} with additional versioning and identification.
* Used with {@link parse} to process templates and generate {@link ParsedMessagesArray}.
*
* @experimental This API is experimental and may change in future versions.
*/
type Prompt = {
/** Human-readable name for the prompt */
name: string;
/** Immutable user-defined identifier for the prompt */
slug: string;
/** Array of messages that make up the conversation */
messages: PromptMessage$1[];
/** The language model to use for this prompt */
model: string;
/** Optional generation parameters */
options?: ModelParams;
/** {@link TSchema} format arguments for API communication */
arguments: Record<string, TSchema>;
/** Optional description of the prompt's purpose */
description?: string;
/** Version identifier for the prompt */
version: string;
/** Optional unique identifier for the prompt */
promptId?: string;
};
/**
* Metadata structure for Axiom prompt tracking and instrumentation.
*
* @experimental This API is experimental and may change in future versions.
*/
interface AxiomPromptMetadata {
/** Unique identifier for the prompt */
id?: string;
/** Human-readable name for the prompt */
name?: string;
/** Inmutable user-defined identifier for the prompt */
slug?: string;
/** Version identifier for the prompt */
version?: string;
}
type PromptMessage = Prompt['messages'][number];
/**
* A message that has been processed and attached metadata.
*
* Extends the base message type with {@link AxiomPromptMetadata} for instrumentation tracking.
*
* @experimental This API is experimental and may change in future versions.
*/
interface ParsedMessage extends PromptMessage {
/** Provider options that may include Axiom metadata for propagation */
providerOptions?: {
/** Internal {@link AxiomPromptMetadata} for prompt tracking */
_axiomMeta?: AxiomPromptMetadata;
[key: string]: any;
};
/** Provider metadata that may include Axiom metadata */
providerMetadata?: {
/** Internal {@link AxiomPromptMetadata} for prompt tracking */
_axiomMeta?: AxiomPromptMetadata;
[key: string]: any;
};
}
type ExtendMessage<T> = T extends object ? T & ParsedMessage : T;
type ExtendedMessages<T extends readonly unknown[]> = {
[K in keyof T]: ExtendMessage<T[K]>;
};
/**
* Array of parsed messages with attached Axiom metadata for prompt tracking.
*
* Returned by {@link parse} function and contains {@link ParsedMessage} items with
* accessible {@link AxiomPromptMetadata} via proxy.
*
* @experimental This API is experimental and may change in future versions.
*/
type ParsedMessagesArray<T extends readonly PromptMessage[] = readonly PromptMessage[]> = ExtendedMessages<T> & {
/** {@link AxiomPromptMetadata} accessible via proxy for prompt tracking */
_axiomMeta: AxiomPromptMetadata;
};
/**
* Parses a prompt template by replacing variables with provided context values.
*
* This function processes Handlebars templates in prompt messages and attaches metadata
* for instrumentation and tracking.
*
* @experimental This API is experimental and may change in future versions.
*
* @param prompt - The {@link Prompt} template to parse
* @param options - Parsing options
* @param options.context - Context values to substitute into the template
* @returns Promise that resolves to the parsed prompt with processed messages;
*/
declare const parse: <TPrompt extends Prompt, TMessages extends TPrompt["messages"] = TPrompt["messages"]>(prompt: TPrompt & {
messages: TMessages;
}, { context, }: {
context: InferContext<TPrompt["arguments"]>;
}) => Promise<Omit<TPrompt, "messages"> & {
messages: ParsedMessagesArray<TMessages>;
}>;
/**
* Instrumentation
*/
/** @deprecated Import from 'axiom/ai/evals/online' instead. */
declare const onlineEval: typeof onlineEval$1;
export { type AxiomAIRedactionPolicy, type AxiomTelemetryConfig, RedactionPolicy, type WithSpanOptions, axiomAIMiddleware, axiomAIMiddlewareV1, axiomAIMiddlewareV2, axiomAIMiddlewareV3, type AxiomPromptMetadata as experimental_AxiomPromptMetadata, type InferContext as experimental_InferContext, type InferSchema as experimental_InferSchema, type ParsedMessage as experimental_ParsedMessage, type ParsedMessagesArray as experimental_ParsedMessagesArray, type Prompt as experimental_Prompt, type TSchema as experimental_TSchema, Type as experimental_Template, Type as experimental_Type, parse as experimental_parse, getGlobalTracer, initAxiomAI, onlineEval, resetAxiomAI, withSpan, wrapAISDKModel, wrapTool, wrapTools };