UNPKG

art-framework

Version:

Agent Runtine (ART) Framework - A browser-first JavaScript/TypeScript framework for building LLM-powered Agentic AI applications that supports MCP and A2A protocols natively

1,490 lines (1,481 loc) 211 kB
import { z } from 'zod'; type UnsubscribeFunction = () => void; interface Subscription<DataType, FilterType> { id: string; callback: (data: DataType) => void; filter?: FilterType; options?: { threadId?: string; }; } /** * A generic class for implementing a publish/subscribe pattern with filtering capabilities. * Designed for decoupling components, particularly UI updates from backend events. */ declare class TypedSocket<DataType, FilterType = any> { protected subscriptions: Map<string, Subscription<DataType, FilterType>>; constructor(); /** * Subscribes a callback function to receive notifications. * @param callback - The function to call when new data is notified. * @param filter - An optional filter to only receive specific types of data. * @param options - Optional configuration, like a threadId for filtering. * @returns An unsubscribe function. */ subscribe(callback: (data: DataType) => void, filter?: FilterType, options?: { threadId?: string; }): UnsubscribeFunction; /** * Notifies all relevant subscribers with new data. * @param data - The data payload to send to subscribers. * @param options - Optional targeting options (e.g., targetThreadId). * @param filterCheck - A function to check if a subscription's filter matches the data. */ notify(data: DataType, options?: { targetThreadId?: string; targetSessionId?: string; }, // targetSessionId might be useful later filterCheck?: (data: DataType, filter?: FilterType) => boolean): void; /** * Optional: Retrieves historical data. This base implementation is empty. * Subclasses might implement this by interacting with repositories. */ getHistory?(_filter?: FilterType, _options?: { threadId?: string; limit?: number; }): Promise<DataType[]>; /** * Clears all subscriptions. Useful for cleanup. */ clearAllSubscriptions(): void; } /** * Entry defining an available provider adapter. * * @interface AvailableProviderEntry */ interface AvailableProviderEntry { /** * Unique key, e.g., 'openai', 'anthropic', 'ollama_local'. * @property {string} name */ name: string; /** * The adapter class. * @property {new (options: any) => ProviderAdapter} adapter */ adapter: new (options: any) => ProviderAdapter; /** * Optional base config (rarely needed if options are per-call). * @property {any} [baseOptions] */ baseOptions?: any; /** * Default: false. Determines singleton vs. pooling behavior. * @property {boolean} [isLocal] */ isLocal?: boolean; } /** * Configuration for the ProviderManager passed during ART initialization. * * @interface ProviderManagerConfig */ interface ProviderManagerConfig { /** * @property {AvailableProviderEntry[]} availableProviders */ availableProviders: AvailableProviderEntry[]; /** * Max concurrent ACTIVE instances per API-based provider NAME. Default: 5. * @property {number} [maxParallelApiInstancesPerProvider] */ maxParallelApiInstancesPerProvider?: number; /** * Time in seconds an API adapter instance can be idle before being eligible for removal. Default: 300. * @property {number} [apiInstanceIdleTimeoutSeconds] */ apiInstanceIdleTimeoutSeconds?: number; } /** * Configuration passed AT RUNTIME for a specific LLM call. * * @interface RuntimeProviderConfig */ interface RuntimeProviderConfig { /** * Must match a name in AvailableProviderEntry. * @property {string} providerName */ providerName: string; /** * Specific model identifier (e.g., 'gpt-4o', 'llama3:latest'). * @property {string} modelId */ modelId: string; /** * Specific options for THIS instance (apiKey, temperature, contextSize, baseUrl, etc.). * @property {any} adapterOptions */ adapterOptions: any; } /** * Object returned by ProviderManager granting access to an adapter instance. * * @interface ManagedAdapterAccessor */ interface ManagedAdapterAccessor { /** * The ready-to-use adapter instance. * @property {ProviderAdapter} adapter */ adapter: ProviderAdapter; /** * Signals that the current call using this adapter instance is finished. * @property {() => void} release */ release: () => void; } /** * Interface for the ProviderManager. * * @interface IProviderManager */ interface IProviderManager { /** * Returns identifiers for all registered potential providers. * @returns {string[]} */ getAvailableProviders(): string[]; /** * Gets a managed adapter instance based on the runtime config. * * @remarks * Handles instance creation, caching, pooling limits, and singleton constraints. * May queue requests or throw errors based on concurrency limits. * * @param {RuntimeProviderConfig} config * @returns {Promise<ManagedAdapterAccessor>} */ getAdapter(config: RuntimeProviderConfig): Promise<ManagedAdapterAccessor>; } /** * @module utils/logger * Provides a simple, static, and configurable logger for the ART framework. * It supports different log levels and can be enabled or disabled globally. */ /** * Defines the available logging levels, ordered from most verbose to least verbose. * * @enum {number} */ declare enum LogLevel { /** Detailed debugging information, useful for development. */ DEBUG = 0, /** General informational messages about application flow. */ INFO = 1, /** Potential issues or unexpected situations that don't prevent execution. */ WARN = 2, /** Errors that indicate a failure or problem. */ ERROR = 3 } /** * Configuration options for the static Logger class. * * @interface LoggerConfig */ interface LoggerConfig { /** * The minimum log level to output messages for. Messages below this level will be ignored. * @property {LogLevel} level */ level: LogLevel; /** * An optional prefix string to prepend to all log messages (e.g., '[MyApp]'). Defaults to '[ART]'. * @property {string} [prefix] */ prefix?: string; } /** * A simple static logger class for outputting messages to the console at different levels. * * @remarks * Configuration is global via the static `configure` method. * * @class Logger */ declare class Logger { private static config; /** * Configures the static logger settings. * * @param config A partial `LoggerConfig` object. Provided settings will override defaults. */ static configure(config: Partial<LoggerConfig>): void; /** * Logs a message at the DEBUG level. * * @remarks * Only outputs if the configured log level is DEBUG. * * @param message The main log message string. * @param args Additional arguments to include in the console output (e.g., objects, arrays). */ static debug(message: string, ...args: any[]): void; /** * Logs a message at the INFO level. * * @remarks * Outputs if the configured log level is INFO or DEBUG. * * @param message The main log message string. * @param args Additional arguments to include in the console output. */ static info(message: string, ...args: any[]): void; /** * Logs a message at the WARN level. * * @remarks * Outputs if the configured log level is WARN, INFO, or DEBUG. * * @param message The main log message string. * @param args Additional arguments to include in the console output. */ static warn(message: string, ...args: any[]): void; /** * Logs a message at the ERROR level. * * @remarks * Outputs if the configured log level is ERROR, WARN, INFO, or DEBUG. * * @param message The main log message string. * @param args Additional arguments to include in the console output (often an error object). */ static error(message: string, ...args: any[]): void; } /** * Defines standard error codes for the ART framework. * These codes categorize errors originating from different subsystems. */ declare enum ErrorCode { /** Invalid or malformed configuration provided. */ INVALID_CONFIG = "INVALID_CONFIG", /** A required API key was not provided. */ MISSING_API_key = "MISSING_API_KEY", /** General configuration-related error. */ CONFIGURATION_ERROR = "CONFIGURATION_ERROR", /** A generic error occurred in the storage layer. */ STORAGE_ERROR = "STORAGE_ERROR", /** The requested thread could not be found in storage. */ THREAD_NOT_FOUND = "THREAD_NOT_FOUND", /** Failed to save data to the storage layer. */ SAVE_FAILED = "SAVE_FAILED", /** An error occurred while communicating with the LLM provider. */ LLM_PROVIDER_ERROR = "LLM_PROVIDER_ERROR", /** Failed to generate a prompt. */ PROMPT_GENERATION_FAILED = "PROMPT_GENERATION_FAILED", /** Failed to parse the output from the LLM. */ OUTPUT_PARSING_FAILED = "OUTPUT_PARSING_FAILED", /** Error during prompt template rendering or initial structure creation. */ PROMPT_ASSEMBLY_FAILED = "PROMPT_ASSEMBLY_FAILED", /** The requested prompt fragment does not exist. */ PROMPT_FRAGMENT_NOT_FOUND = "PROMPT_FRAGMENT_NOT_FOUND", /** The constructed prompt object failed schema validation. */ PROMPT_VALIDATION_FAILED = "PROMPT_VALIDATION_FAILED", /** Failed to translate the ART standard prompt to a provider-specific format. */ PROMPT_TRANSLATION_FAILED = "PROMPT_TRANSLATION_FAILED", /** The requested tool could not be found in the registry. */ TOOL_NOT_FOUND = "TOOL_NOT_FOUND", /** The provided tool schema failed validation. */ TOOL_SCHEMA_VALIDATION_FAILED = "TOOL_SCHEMA_VALIDATION_FAILED", /** A generic error occurred during tool execution. */ TOOL_EXECUTION_ERROR = "TOOL_EXECUTION_ERROR", /** The requested tool is disabled for the current thread. */ TOOL_DISABLED = "TOOL_DISABLED", /** The planning phase of the agent failed. */ PLANNING_FAILED = "PLANNING_FAILED", /** An error occurred within the ToolSystem execution loop. */ TOOL_EXECUTION_FAILED = "TOOL_EXECUTION_FAILED", /** The synthesis phase of the agent failed. */ SYNTHESIS_FAILED = "SYNTHESIS_FAILED", /** A general error occurred during the agent's process method. */ AGENT_PROCESSING_ERROR = "AGENT_PROCESSING_ERROR", /** An A2A (Agent-to-Agent) task delegation failed. */ DELEGATION_FAILED = "DELEGATION_FAILED", /** A network error occurred. */ NETWORK_ERROR = "NETWORK_ERROR", /** An operation timed out. */ TIMEOUT_ERROR = "TIMEOUT_ERROR", /** An operation timed out (duplicate of TIMEOUT_ERROR). */ TIMEOUT = "TIMEOUT", /** An error occurred with an external service. */ EXTERNAL_SERVICE_ERROR = "EXTERNAL_SERVICE_ERROR", /** The requested task was not found. */ TASK_NOT_FOUND = "TASK_NOT_FOUND", /** Input data failed validation. */ VALIDATION_ERROR = "VALIDATION_ERROR", /** The request was invalid or malformed. */ INVALID_REQUEST = "INVALID_REQUEST", /** A task with the same ID already exists. */ DUPLICATE_TASK_ID = "DUPLICATE_TASK_ID", /** A generic error occurred in a repository. */ REPOSITORY_ERROR = "REPOSITORY_ERROR", /** A connection is already established. */ ALREADY_CONNECTED = "ALREADY_CONNECTED", /** A required configuration is missing. */ MISSING_CONFIG = "MISSING_CONFIG", /** The requested feature is not implemented. */ NOT_IMPLEMENTED = "NOT_IMPLEMENTED", /** No active connection is available. */ NOT_CONNECTED = "NOT_CONNECTED", /** The request timed out. */ REQUEST_TIMEOUT = "REQUEST_TIMEOUT", /** Standard input is not available. */ NO_STDIN = "NO_STDIN", /** The provided URL is not an HTTP/HTTPS URL. */ NO_HTTP_URL = "NO_HTTP_URL", /** An HTTP error occurred. */ HTTP_ERROR = "HTTP_ERROR", /** The requested server was not found. */ SERVER_NOT_FOUND = "SERVER_NOT_FOUND", /** A health check for a service failed. */ HEALTH_CHECK_FAILED = "HEALTH_CHECK_FAILED", /** Failed to discover tools from a remote source. */ TOOL_DISCOVERY_FAILED = "TOOL_DISCOVERY_FAILED", /** The requested transport protocol is not supported. */ UNSUPPORTED_TRANSPORT = "UNSUPPORTed_TRANSPORT", /** A CORS browser extension is required to proceed. */ CORS_EXTENSION_REQUIRED = "CORS_EXTENSION_REQUIRED", /** CORS permissions are required but have not been granted. */ CORS_PERMISSION_REQUIRED = "CORS_PERMISSION_REQUIRED", /** An unknown or unexpected error occurred. */ UNKNOWN_ERROR = "UNKNOWN_ERROR", /** The requested LLM provider is not known or configured. */ UNKNOWN_PROVIDER = "UNKNOWN_PROVIDER", /** Attempted to activate a local provider when another is already active. */ LOCAL_PROVIDER_CONFLICT = "LOCAL_PROVIDER_CONFLICT", /** The requested local LLM instance is currently busy. */ LOCAL_INSTANCE_BUSY = "LOCAL_INSTANCE_BUSY", /** Timeout waiting for an available instance of an API provider. */ API_QUEUE_TIMEOUT = "API_QUEUE_TIMEOUT", /** Failed to instantiate an adapter for a provider. */ ADAPTER_INSTANTIATION_ERROR = "ADAPTER_INSTANTIATION_ERROR" } /** * Custom error class for ART framework specific errors. * It includes an error code, an optional original error for chaining, * and a details object for additional context. */ declare class ARTError extends Error { /** The specific error code from the ErrorCode enum. */ readonly code: ErrorCode; /** The original error that caused this error, if any. */ readonly originalError?: Error; /** A record of additional details about the error. */ details: Record<string, any>; /** * Creates an instance of ARTError. * @param {string} message - The error message. * @param {ErrorCode} code - The error code. * @param {Error} [originalError] - The original error, if any. * @param {Record<string, any>} [details={}] - Additional details about the error. */ constructor(message: string, code: ErrorCode, originalError?: Error, details?: Record<string, any>); /** * Returns a string representation of the error, including the original error if present. * @returns {string} The string representation of the error. */ toString(): string; } /** * Error thrown when a requested LLM provider is not known or configured. */ declare class UnknownProviderError extends ARTError { constructor(providerName: string); } /** * Error thrown when attempting to activate a local provider while another is already active. */ declare class LocalProviderConflictError extends ARTError { constructor(requestedProvider: string, activeProvider: string); } /** * Error thrown when a requested local LLM instance is currently busy. */ declare class LocalInstanceBusyError extends ARTError { constructor(providerName: string, modelId: string); } /** * Error thrown when a timeout occurs while waiting for an available instance of an API provider. */ declare class ApiQueueTimeoutError extends ARTError { constructor(providerName: string); } /** * Error thrown when an adapter for a provider fails to instantiate. */ declare class AdapterInstantiationError extends ARTError { constructor(providerName: string, originalError: Error); } type StreamEventTypeFilter = StreamEvent['type'] | Array<StreamEvent['type']>; /** * A dedicated socket for broadcasting LLM stream events (`StreamEvent`) to UI subscribers. * Extends the generic TypedSocket and implements filtering based on `StreamEvent.type`. */ declare class LLMStreamSocket extends TypedSocket<StreamEvent, StreamEventTypeFilter> { constructor(); /** * Notifies subscribers about a new LLM stream event. * Filters based on event type if a filter is provided during subscription. * @param event - The StreamEvent data. */ notifyStreamEvent(event: StreamEvent): void; } /** * @module types/schemas * This module defines Zod schemas for validating the core data structures of the ART framework, * ensuring type safety and data integrity at runtime. */ /** * Zod schema for validating a single {@link ArtStandardMessage} object. * * @remarks * This schema enforces the structural and type requirements for each message, including: * - A valid `role` from the {@link ArtStandardMessageRole} enum. * - `content` that matches the expected type for a given role (e.g., string for 'user', string or null for 'assistant'). * - The presence of `tool_call_id` for 'tool' or 'tool_result' roles. * - The structure of `tool_calls` when present in an 'assistant' message. * * It uses a `.refine()` method to implement context-aware validation based on the message's `role`. */ declare const ArtStandardMessageSchema: z.ZodEffects<z.ZodObject<{ role: z.ZodType<ArtStandardMessageRole, z.ZodTypeDef, ArtStandardMessageRole>; content: z.ZodUnion<[z.ZodString, z.ZodRecord<z.ZodString, z.ZodAny>, z.ZodNull]>; name: z.ZodOptional<z.ZodString>; tool_calls: z.ZodOptional<z.ZodArray<z.ZodObject<{ id: z.ZodString; type: z.ZodLiteral<"function">; function: z.ZodObject<{ name: z.ZodString; arguments: z.ZodString; }, "strip", z.ZodTypeAny, { name: string; arguments: string; }, { name: string; arguments: string; }>; }, "strip", z.ZodTypeAny, { function: { name: string; arguments: string; }; type: "function"; id: string; }, { function: { name: string; arguments: string; }; type: "function"; id: string; }>, "many">>; tool_call_id: z.ZodOptional<z.ZodString>; }, "strict", z.ZodTypeAny, { role: ArtStandardMessageRole; content: string | Record<string, any> | null; name?: string | undefined; tool_calls?: { function: { name: string; arguments: string; }; type: "function"; id: string; }[] | undefined; tool_call_id?: string | undefined; }, { role: ArtStandardMessageRole; content: string | Record<string, any> | null; name?: string | undefined; tool_calls?: { function: { name: string; arguments: string; }; type: "function"; id: string; }[] | undefined; tool_call_id?: string | undefined; }>, { role: ArtStandardMessageRole; content: string | Record<string, any> | null; name?: string | undefined; tool_calls?: { function: { name: string; arguments: string; }; type: "function"; id: string; }[] | undefined; tool_call_id?: string | undefined; }, { role: ArtStandardMessageRole; content: string | Record<string, any> | null; name?: string | undefined; tool_calls?: { function: { name: string; arguments: string; }; type: "function"; id: string; }[] | undefined; tool_call_id?: string | undefined; }>; /** * Zod schema for validating an entire {@link ArtStandardPrompt} (an array of messages). * * @remarks * This is a straightforward array schema that applies the {@link ArtStandardMessageSchema} to each element, * ensuring that every message in the prompt conforms to the required structure. */ declare const ArtStandardPromptSchema: z.ZodArray<z.ZodEffects<z.ZodObject<{ role: z.ZodType<ArtStandardMessageRole, z.ZodTypeDef, ArtStandardMessageRole>; content: z.ZodUnion<[z.ZodString, z.ZodRecord<z.ZodString, z.ZodAny>, z.ZodNull]>; name: z.ZodOptional<z.ZodString>; tool_calls: z.ZodOptional<z.ZodArray<z.ZodObject<{ id: z.ZodString; type: z.ZodLiteral<"function">; function: z.ZodObject<{ name: z.ZodString; arguments: z.ZodString; }, "strip", z.ZodTypeAny, { name: string; arguments: string; }, { name: string; arguments: string; }>; }, "strip", z.ZodTypeAny, { function: { name: string; arguments: string; }; type: "function"; id: string; }, { function: { name: string; arguments: string; }; type: "function"; id: string; }>, "many">>; tool_call_id: z.ZodOptional<z.ZodString>; }, "strict", z.ZodTypeAny, { role: ArtStandardMessageRole; content: string | Record<string, any> | null; name?: string | undefined; tool_calls?: { function: { name: string; arguments: string; }; type: "function"; id: string; }[] | undefined; tool_call_id?: string | undefined; }, { role: ArtStandardMessageRole; content: string | Record<string, any> | null; name?: string | undefined; tool_calls?: { function: { name: string; arguments: string; }; type: "function"; id: string; }[] | undefined; tool_call_id?: string | undefined; }>, { role: ArtStandardMessageRole; content: string | Record<string, any> | null; name?: string | undefined; tool_calls?: { function: { name: string; arguments: string; }; type: "function"; id: string; }[] | undefined; tool_call_id?: string | undefined; }, { role: ArtStandardMessageRole; content: string | Record<string, any> | null; name?: string | undefined; tool_calls?: { function: { name: string; arguments: string; }; type: "function"; id: string; }[] | undefined; tool_call_id?: string | undefined; }>, "many">; /** * @module systems/mcp/types * This module defines the public and internal types used for configuring * and managing the state of the MCP (Multi-Capability Provider) system. */ /** * Defines the connection details for a streamable HTTP-based MCP server. * This is the primary transport mechanism for browser-based MCP communication. * * @interface StreamableHttpConnection */ interface StreamableHttpConnection { /** * The base URL of the MCP server. * @property {string} url */ url: string; /** * Optional headers to include in every request to the server. * @property {Record<string, string>} [headers] */ headers?: Record<string, string>; /** * The ID of an authentication strategy to use for this connection. * @property {string} [authStrategyId] */ authStrategyId?: string; /** * Optional OAuth configuration for automatic PKCE setup per server. * This enables secure, per-server authentication without manual token handling. * @property {object} [oauth] */ oauth?: { /** * The type of OAuth flow, currently supporting 'pkce'. * @property {'pkce'} type */ type: 'pkce'; /** * The OAuth 2.1 Authorization Endpoint URL. * @property {string} authorizationEndpoint */ authorizationEndpoint: string; /** * The OAuth 2.1 Token Endpoint URL. * @property {string} tokenEndpoint */ tokenEndpoint: string; /** * The public client ID for the OAuth application. * @property {string} clientId */ clientId: string; /** * A space-delimited string of OAuth scopes to request. * @property {string} scopes */ scopes: string; /** * The redirect URI that will handle the OAuth callback. * @property {string} redirectUri */ redirectUri: string; /** * Optional 'resource' parameter for OAuth 2.1, often used as an audience identifier. * @property {string} [resource] */ resource?: string; /** * Determines whether to open the login page in a new tab. * Defaults to true if omitted. * @property {boolean} [openInNewTab] */ openInNewTab?: boolean; /** * An optional BroadcastChannel name for delivering tokens, useful in multi-window scenarios. * @property {string} [channelName] */ channelName?: string; }; } /** * Defines the schema for a tool provided by an MCP server. * * @interface McpToolDefinition */ interface McpToolDefinition { /** * The name of the tool. * @property {string} name */ name: string; /** * A description of what the tool does. * @property {string} [description] */ description?: string; /** * The JSON schema for the tool's input. * @property {any} inputSchema */ inputSchema: any; /** * The JSON schema for the tool's output. * @property {any} [outputSchema] */ outputSchema?: any; } /** * Defines a static resource provided by an MCP server. * * @interface McpResource */ interface McpResource { /** * The URI of the resource. * @property {string} uri */ uri: string; /** * The name of the resource. * @property {string} name */ name: string; /** * The MIME type of the resource. * @property {string} [mimeType] */ mimeType?: string; /** * A description of the resource. * @property {string} [description] */ description?: string; } /** * Defines a template for a resource provided by an MCP server. * * @interface McpResourceTemplate */ interface McpResourceTemplate { /** * The URI template for the resource. * @property {string} uriTemplate */ uriTemplate: string; /** * The name of the resource template. * @property {string} name */ name: string; /** * A description of the resource template. * @property {string} [description] */ description?: string; /** * The MIME type of the resource. * @property {string} [mimeType] */ mimeType?: string; } /** * Represents the configuration for a single MCP server. * * @remarks * This is the format for each server entry in the `art_mcp_config.json` file. * It contains all the necessary information for discovering, installing, and connecting to an MCP server. * * @typedef {object} McpServerConfig */ type McpServerConfig = { /** * A unique identifier for the server. * @property {string} id */ id: string; /** * The transport type for the server, currently only 'streamable-http' is supported. * @property {'streamable-http'} type */ type: 'streamable-http'; /** * Whether the server is enabled and should be connected to. * @property {boolean} enabled */ enabled: boolean; /** * A user-friendly name for the server. * @property {string} [displayName] */ displayName?: string; /** * A description of the server and its capabilities. * @property {string} [description] */ description?: string; /** * The connection details for the server. * @see module:systems/mcp/types.StreamableHttpConnection */ connection: StreamableHttpConnection; /** * Information about how the server was installed (e.g., 'git', 'npm', 'manual'). * @property {object} [installation] */ installation?: { source: 'git' | 'npm' | 'manual'; [key: string]: any; }; /** * The timeout in milliseconds for requests to the server. * @property {number} [timeout] */ timeout?: number; /** * The tools provided by the server. * @property {McpToolDefinition[]} tools */ tools: McpToolDefinition[]; /** * The static resources provided by the server. * @property {McpResource[]} resources */ resources: McpResource[]; /** * The resource templates provided by the server. * @property {McpResourceTemplate[]} resourceTemplates */ resourceTemplates: McpResourceTemplate[]; }; /** * Represents the internal status of an MCP server connection. * This is not part of the public configuration. * * @interface McpServerStatus */ interface McpServerStatus { /** * The unique identifier for the server. * @property {string} id */ id: string; /** * The current connection status of the server. * @property {'connected' | 'disconnected' | 'error' | 'connecting'} status */ status: 'connected' | 'disconnected' | 'error' | 'connecting'; /** * The timestamp of the last successful connection. * @property {Date} [lastConnected] */ lastConnected?: Date; /** * The last error message received from the server. * @property {string} [lastError] */ lastError?: string; /** * The number of tools registered from this server. * @property {number} toolCount */ toolCount: number; } /** * Defines the configuration for the McpManager. * * @interface McpManagerConfig */ interface McpManagerConfig { /** * Whether to enable MCP functionality. Defaults to false. * @property {boolean} enabled */ enabled: boolean; /** * An optional endpoint URL for discovering MCP servers. * Defaults to the Zyntopia API if not provided. * @property {string} [discoveryEndpoint] */ discoveryEndpoint?: string; } /** * Represents the role of a message sender in a conversation. * * @enum {string} */ declare enum MessageRole { /** The end-user interacting with the agent. */ USER = "USER", /** The AI agent. */ AI = "AI", /** A system-level message providing context or instructions. */ SYSTEM = "SYSTEM", /** A message containing the result of a tool execution. */ TOOL = "TOOL" } /** * Represents a single message within a conversation thread. * * @interface ConversationMessage */ interface ConversationMessage { /** * A unique identifier for this specific message. * @property {string} messageId */ messageId: string; /** * The identifier of the conversation thread this message belongs to. * @property {string} threadId */ threadId: string; /** * The role of the sender (User, AI, System, or Tool). * @property {MessageRole} role */ role: MessageRole; /** * The textual content of the message. * @property {string} content */ content: string; /** * A Unix timestamp (in milliseconds) indicating when the message was created. * @property {number} timestamp */ timestamp: number; /** * Optional metadata associated with the message (e.g., related observation IDs, tool call info, UI state). * @property {Record<string, any>} [metadata] */ metadata?: Record<string, any>; } /** * Represents the type of an observation record, capturing significant events during agent execution. * * @enum {string} */ declare enum ObservationType { /** The user's inferred intent. */ INTENT = "INTENT", /** The generated concise thread title. */ TITLE = "TITLE", /** The agent's step-by-step plan to address the intent. */ PLAN = "PLAN", /** The agent's internal monologue or reasoning process. */ THOUGHTS = "THOUGHTS", /** Records the LLM's decision to call one or more tools (part of the plan). */ TOOL_CALL = "TOOL_CALL", /** Records the actual execution attempt and result of a specific tool call. */ TOOL_EXECUTION = "TOOL_EXECUTION", /** Records events specifically related to the synthesis phase (e.g., the LLM call). */ SYNTHESIS = "SYNTHESIS", /** Records an error encountered during any phase of execution. */ ERROR = "ERROR", /** Records the final AI response message generated by the agent. */ FINAL_RESPONSE = "FINAL_RESPONSE", /** Records changes made to the agent's persistent state. */ STATE_UPDATE = "STATE_UPDATE", /** Logged by Agent Core when LLM stream consumption begins. */ LLM_STREAM_START = "LLM_STREAM_START", /** Logged by Agent Core upon receiving a METADATA stream event. Content should be LLMMetadata. */ LLM_STREAM_METADATA = "LLM_STREAM_METADATA", /** Logged by Agent Core upon receiving an END stream event. */ LLM_STREAM_END = "LLM_STREAM_END", /** Logged by Agent Core upon receiving an ERROR stream event. Content should be Error object or message. */ LLM_STREAM_ERROR = "LLM_STREAM_ERROR" } /** * Represents the different capabilities a model might possess. * Used for model selection and validation. * * @enum {string} */ declare enum ModelCapability { /** Basic text generation/understanding. */ TEXT = "text", /** Ability to process and understand images. */ VISION = "vision", /** Supports streaming responses chunk by chunk. */ STREAMING = "streaming", /** Capable of using tools/function calling. */ TOOL_USE = "tool_use", /** Built-in or optimized for Retrieval-Augmented Generation. */ RAG = "rag", /** Specialized in understanding or generating code. */ CODE = "code", /** Advanced reasoning, planning, complex instruction following. */ REASONING = "reasoning" } /** * Represents a recorded event during the agent's execution. * * @interface Observation */ interface Observation { /** * A unique identifier for this specific observation record. * @property {string} id */ id: string; /** * The identifier of the conversation thread this observation relates to. * @property {string} threadId */ threadId: string; /** * An optional identifier for tracing a request across multiple systems or components. * @property {string} [traceId] */ traceId?: string; /** * A Unix timestamp (in milliseconds) indicating when the observation was recorded. * @property {number} timestamp */ timestamp: number; /** * The category of the event being observed (e.g., PLAN, THOUGHTS, TOOL_EXECUTION). * @property {ObservationType} type */ type: ObservationType; /** * A concise, human-readable title summarizing the observation (often generated based on type/metadata). * @property {string} title */ title: string; /** * The main data payload of the observation, structure depends on the `type`. * * @remarks * Common content shapes by `type`: * - `TITLE`: `{ title: string }` — a concise thread title (<= 10 words) * - `INTENT`: `{ intent: string }` * - `PLAN`: `{ plan: string; rawOutput?: string }` * - `TOOL_CALL`: `{ toolCalls: ParsedToolCall[] }` * - `TOOL_EXECUTION`: `{ callId: string; toolName: string; status: 'success' | 'error'; output?: any; error?: string }` * - `FINAL_RESPONSE`: `{ message: ConversationMessage; uiMetadata?: object }` * @property {any} content */ content: any; /** * Optional metadata providing additional context (e.g., source phase, related IDs, status). * @property {Record<string, any>} [metadata] */ metadata?: Record<string, any>; } /** * Represents a single event emitted from an asynchronous LLM stream (`ReasoningEngine.call`). * * @remarks * Allows for real-time delivery of tokens, metadata, errors, and lifecycle signals. * Adapters are responsible for translating provider-specific stream chunks into these standard events. * * @interface StreamEvent */ interface StreamEvent { /** * The type of the stream event. * - `TOKEN`: A chunk of text generated by the LLM. * - `METADATA`: Information about the LLM call (e.g., token counts, stop reason), typically sent once at the end. * - `ERROR`: An error occurred during the LLM call or stream processing. `data` will contain the Error object. * - `END`: Signals the successful completion of the stream. `data` is typically null. * @property {'TOKEN' | 'METADATA' | 'ERROR' | 'END'} type */ type: 'TOKEN' | 'METADATA' | 'ERROR' | 'END'; /** * The actual content of the event. * - For `TOKEN`: string (the text chunk). * - For `METADATA`: `LLMMetadata` object. * - For `ERROR`: `Error` object or error details. * - For `END`: null. * @property {any} data */ data: any; /** * Optional: Provides a more specific classification for `TOKEN` events, * combining LLM-level detection (thinking/response, if available from adapter) * and agent-level context (`callContext` from `CallOptions`). * Used by consumers (like UI) to differentiate between intermediate thoughts and the final response. * * - `LLM_THINKING`: Token identified by the adapter as part of the LLM's internal reasoning/thought process. * - `LLM_RESPONSE`: Token identified by the adapter as part of the LLM's final response content. * - `AGENT_THOUGHT_LLM_THINKING`: Token from an LLM call made in the 'AGENT_THOUGHT' context, identified as thinking. * - `AGENT_THOUGHT_LLM_RESPONSE`: Token from an LLM call made in the 'AGENT_THOUGHT' context, identified as response (e.g., the raw planning output). * - `FINAL_SYNTHESIS_LLM_THINKING`: Token from an LLM call made in the 'FINAL_SYNTHESIS' context, identified as thinking. * - `FINAL_SYNTHESIS_LLM_RESPONSE`: Token from an LLM call made in the 'FINAL_SYNTHESIS' context, identified as response (part of the final answer to the user). * * @remarks * Not all adapters can reliably distinguish 'LLM_THINKING' vs 'LLM_RESPONSE'. * Adapters should prioritize setting the agent context part (`AGENT_THOUGHT_...` or `FINAL_SYNTHESIS_...`) based on `CallOptions.callContext`. * If thinking detection is unavailable, adapters should default to `AGENT_THOUGHT_LLM_RESPONSE` or `FINAL_SYNTHESIS_LLM_RESPONSE`. * @property {'LLM_THINKING' | 'LLM_RESPONSE' | 'AGENT_THOUGHT_LLM_THINKING' | 'AGENT_THOUGHT_LLM_RESPONSE' | 'FINAL_SYNTHESIS_LLM_THINKING' | 'FINAL_SYNTHESIS_LLM_RESPONSE'} [tokenType] */ tokenType?: 'LLM_THINKING' | 'LLM_RESPONSE' | 'AGENT_THOUGHT_LLM_THINKING' | 'AGENT_THOUGHT_LLM_RESPONSE' | 'FINAL_SYNTHESIS_LLM_THINKING' | 'FINAL_SYNTHESIS_LLM_RESPONSE'; /** * The identifier of the conversation thread this event belongs to. * @property {string} threadId */ threadId: string; /** * The identifier tracing the specific agent execution cycle this event is part of. * @property {string} traceId */ traceId: string; /** * Optional identifier linking the event to a specific UI tab/window. * @property {string} [sessionId] */ sessionId?: string; } /** * Represents a basic JSON Schema definition, focusing on object types commonly used for tool inputs/outputs. * This is a simplified representation and doesn't cover all JSON Schema features. * * @interface JsonObjectSchema */ interface JsonObjectSchema { /** * @property {'object'} type */ type: 'object'; /** * @property {object} properties */ properties: { [key: string]: { type: string; description?: string; default?: any; items?: JsonObjectSchema | { type: string; }; properties?: JsonObjectSchema['properties']; required?: string[]; additionalProperties?: boolean | { type: string; }; [key: string]: any; }; }; required?: string[]; additionalProperties?: boolean; } type JsonSchema = JsonObjectSchema | { type: 'string' | 'number' | 'boolean' | 'array'; [key: string]: any; }; /** * Structure for holding metadata about an LLM call, typically received via a `METADATA` `StreamEvent` * or parsed from a non-streaming response. Fields are optional as availability varies by provider and stream state. * * @interface LLMMetadata */ interface LLMMetadata { /** * The number of tokens in the input prompt, if available. * @property {number} [inputTokens] */ inputTokens?: number; /** * The number of tokens generated in the output response, if available. * @property {number} [outputTokens] */ outputTokens?: number; /** * The number of tokens identified as part of the LLM's internal thinking process (if available from provider). * @property {number} [thinkingTokens] */ thinkingTokens?: number; /** * The time elapsed (in milliseconds) until the first token was generated in a streaming response, if applicable and available. * @property {number} [timeToFirstTokenMs] */ timeToFirstTokenMs?: number; /** * The total time elapsed (in milliseconds) for the entire generation process, if available. * @property {number} [totalGenerationTimeMs] */ totalGenerationTimeMs?: number; /** * The reason the LLM stopped generating tokens (e.g., 'stop_sequence', 'max_tokens', 'tool_calls'), if available. * @property {string} [stopReason] */ stopReason?: string; /** * Optional raw usage data provided directly by the LLM provider for extensibility (structure depends on provider). * @property {any} [providerRawUsage] */ providerRawUsage?: any; /** * The trace ID associated with the LLM call, useful for correlating metadata with the specific request. * @property {string} [traceId] */ traceId?: string; } /** * Defines the schema for a tool, including its input parameters. * Uses JSON Schema format for inputSchema. * * @interface ToolSchema */ interface ToolSchema { /** * A unique name identifying the tool (used in LLM prompts and registry lookups). Must be unique. * @property {string} name */ name: string; /** * A clear description of what the tool does, intended for the LLM to understand its purpose and usage. * @property {string} description */ description: string; /** * A JSON Schema object defining the structure, types, and requirements of the input arguments the tool expects. * @property {JsonSchema} inputSchema */ inputSchema: JsonSchema; /** * An optional JSON Schema object defining the expected structure of the data returned in the `output` field of a successful `ToolResult`. * @property {JsonSchema} [outputSchema] */ outputSchema?: JsonSchema; /** * Optional array of examples demonstrating how to use the tool, useful for few-shot prompting of the LLM. * @property {Array<{ input: any; output?: any; description?: string }>} [examples] */ examples?: Array<{ input: any; output?: any; description?: string; }>; } /** * Represents the structured result of a tool execution. * * @interface ToolResult */ interface ToolResult { /** * The unique identifier of the corresponding `ParsedToolCall` that initiated this execution attempt. * @property {string} callId */ callId: string; /** * The name of the tool that was executed. * @property {string} toolName */ toolName: string; /** * Indicates whether the tool execution succeeded or failed. * @property {'success' | 'error'} status */ status: 'success' | 'error'; /** * The data returned by the tool upon successful execution. Structure may be validated against `outputSchema`. * @property {any} [output] */ output?: any; /** * A descriptive error message if the execution failed (`status` is 'error'). * @property {string} [error] */ error?: string; /** * Optional metadata about the execution (e.g., duration, cost, logs). * @property {object} [metadata] */ metadata?: { sources?: Array<{ sourceName: string; url?: string; [key: string]: any; }>; [key: string]: any; }; } /** * Strategy for combining custom system prompt content across precedence levels. * * @typedef {'append' | 'prepend'} SystemPromptMergeStrategy */ type SystemPromptMergeStrategy = 'append' | 'prepend'; /** * Named preset for system prompts, supporting variables and a default merge strategy. * * @interface SystemPromptSpec */ interface SystemPromptSpec { /** * Optional explicit ID; when in a registry map, the key is typically the tag. * @property {string} [id] */ id?: string; /** * Template string. Supports simple {{variable}} placeholders and {{fragment:name}} for PromptManager fragments. * @property {string} template */ template: string; /** * Default variables applied if not provided at use time. * @property {Record<string, any>} [defaultVariables] */ defaultVariables?: Record<string, any>; /** * Default strategy to combine this spec with lower levels. Defaults to 'append'. * @property {SystemPromptMergeStrategy} [mergeStrategy] */ mergeStrategy?: SystemPromptMergeStrategy; } /** * Registry of available system prompt presets (tags) at the instance level. * * @interface SystemPromptsRegistry */ interface SystemPromptsRegistry { /** * Tag to use when no other tag is specified. * @property {string} [defaultTag] */ defaultTag?: string; /** * Mapping of tag -> spec. * @property {Record<string, SystemPromptSpec>} specs */ specs: Record<string, SystemPromptSpec>; } /** * Override provided at instance/thread/call level to select a tag and/or provide variables, * or to provide freeform content and a merge strategy. * * @interface SystemPromptOverride */ interface SystemPromptOverride { /** * Preset tag from the registry (e.g., 'default', 'legal_advisor'). * @property {string} [tag] */ tag?: string; /** * Variables to substitute in the selected template. * @property {Record<string, any>} [variables] */ variables?: Record<string, any>; /** * Freeform content to apply directly (escape hatch). * @property {string} [content] */ content?: string; /** * Merge behavior against previous level: append | prepend. * @property {SystemPromptMergeStrategy} [strategy] */ strategy?: SystemPromptMergeStrategy; } /** * Represents a parsed request from the LLM to call a specific tool. * * @interface ParsedToolCall */ interface ParsedToolCall { /** * A unique identifier generated by the OutputParser for this specific tool call request within a plan. * @property {string} callId */ callId: string; /** * The name of the tool the LLM intends to call. Must match a registered tool's schema name. * @property {string} toolName */ toolName: string; /** * The arguments object, parsed from the LLM response, intended to be passed to the tool's `execute` method after validation. * @property {any} arguments */ arguments: any; } /** * Configuration specific to a conversation thread. * * @interface ThreadConfig */ interface ThreadConfig { /** * Default provider configuration for this thread. * @property {RuntimeProviderConfig} providerConfig */ providerConfig: RuntimeProviderConfig; /** * An array of tool names (matching `ToolSchema.name`) that are permitted for use within this thread. * @property {string[]} enabledTools */ enabledTools: string[]; /** * The maximum number of past messages (`ConversationMessage` objects) to retrieve for context. * @property {number} historyLimit */ historyLimit: number; /** * Optional system prompt override to be used for this thread, overriding instance or agent defaults. * @property {string | SystemPromptOverride} [systemPrompt] */ systemPrompt?: string | SystemPromptOverride; /** * Optional: Defines the identity and high-level guidance for the agent for this specific thread. * This overrides the instance-level persona. * @property {Partial<AgentPersona>} [persona] */ persona?: Partial<AgentPersona>; } /** * Represents non-configuration state associated with an agent or thread. * Could include user preferences, accumulated knowledge, etc. (Less defined for v1.0) * * @interface AgentState */ interface AgentState { /** * The primary data payload of the agent's state. Structure is application-defined. * @property {any} data */ data: any; /** * An optional version number for the agent's state, useful for migrations or tracking changes. * @property {number} [version] */ version?: number; /** * Allows for other arbitrary properties to be stored in the agent's state. * @property {any} [key: string] */ [key: string]: any; } /** * Encapsulates the configuration and state for a specific thread. * * @interface ThreadContext */ interface ThreadContext { /** * The configuration settings (`ThreadConfig`) currently active for the thread. * @property {ThreadConfig} config */ config: ThreadConfig; /** * The persistent state (`AgentState`) associated with the thread, or `null` if no state exists. * @property {AgentState | null} state */ state: AgentState | null; } /** * Properties required to in