UNPKG

openagentic

Version:

A TypeScript framework for building AI agents with self-contained tool orchestration capabilities

817 lines (801 loc) 29.3 kB
import { A as AIModel, L as LogLevel, O as OrchestratorOptions, E as ExecutionResult, C as CoreMessage, a as OpenAgenticTool, B as BaseOrchestrator, M as Message, b as ApiKeyMap, c as OrchestratorType, P as PromptBasedOrchestrator, d as OrchestratorContext, e as CustomLogicOrchestrator } from './index-Bbhf6zbC.mjs'; export { W as AIModelSchema, a0 as ExecutionResultSchema, Z as ExecutionStats, Y as LoggingConfig, X as MessageSchema, _ as StepInfo, T as ToolDescription, $ as ToolDetails, g as aiTools, i as allToolDescriptions, h as allTools, p as anthropicTool, q as cohereTool, f as convertLangchainTool, H as elevenlabsTool, n as geminiImageTool, r as geminiTool, J as geminiTtsTool, j as getToolDescription, k as getToolDescriptionsByCategory, v as githubTool, w as grokTool, S as groqTool, N as htmlComposerTool, K as inceptionLabsTool, x as llamaTool, U as lumaImageTool, y as mistralTool, z as newsdataTool, l as openaiImageTool, o as openaiTool, m as openaiVectorStoreTool, D as perplexityTool, F as qrcodeTool, s as searchToolDescriptions, R as slackPosterTool, V as toOpenAgenticTool, t as toolDescriptionsByCategory, Q as unsplashTool, u as utilityTools, I as videoGenerationTool, G as websearchTool } from './index-Bbhf6zbC.mjs'; import { streamText } from 'ai'; import 'zod'; declare class Orchestrator { private model; private tools; private messages; private iterations; private maxIterations; private customLogic?; private orchestrator?; private orchestratorOptions; private loggingConfig; private executionStartTime; private stepTimings; private toolCallTimings; private stepsExecuted; private toolCallsExecuted; constructor(options: { model: string | AIModel; tools?: any[]; systemPrompt?: string; maxIterations?: number; customLogic?: (input: string, context: any) => Promise<any>; enableDebugLogging?: boolean; logLevel?: LogLevel; enableStepLogging?: boolean; enableToolLogging?: boolean; enableTimingLogging?: boolean; enableStatisticsLogging?: boolean; } & OrchestratorOptions); execute(input: string): Promise<ExecutionResult>; execute(messages: CoreMessage[]): Promise<ExecutionResult>; private executeWithOrchestrator; private executeWithCustomLogicOrchestrator; private executeWithPromptBasedOrchestrator; private executeWithString; private executeWithMessages; addTool(tool: OpenAgenticTool): void; removeTool(toolName: string): void; getTool(toolName: string): any; getAllTools(): OpenAgenticTool[]; switchModel(model: string | AIModel): void; getModelInfo(): any; getOrchestrator(): BaseOrchestrator | undefined; setOrchestrator(orchestrator: string | BaseOrchestrator | undefined): void; hasOrchestrator(): boolean; getMessages(): Message[]; addMessage(message: Message): void; reset(): void; clear(): void; private createStepFinishCallback; private convertCoresToAISDK; private convertCoreToInternal; private convertToAISDKTools; private log; private sanitizeForLogging; private resetExecutionStats; private calculateExecutionStats; private executeWithCustomLogic; } declare class StreamingOrchestrator { private model; private tools; private messages; private maxIterations; private customLogic?; private orchestrator?; private orchestratorOptions; private loggingConfig; private executionStartTime; private stepTimings; private toolCallTimings; private stepsExecuted; private toolCallsExecuted; private chunksProcessed; private totalTextLength; private onFinishCallback?; constructor(options: { model: string | AIModel; tools?: any[]; systemPrompt?: string; maxIterations?: number; customLogic?: (input: string, context: any) => Promise<any>; enableDebugLogging?: boolean; logLevel?: LogLevel; enableStepLogging?: boolean; enableToolLogging?: boolean; enableTimingLogging?: boolean; enableStatisticsLogging?: boolean; enableStreamingLogging?: boolean; onFinish?: (result: any) => void | Promise<void>; } & OrchestratorOptions); stream(input: string): Promise<ReturnType<typeof streamText>>; stream(messages: CoreMessage[]): Promise<ReturnType<typeof streamText>>; private streamWithCustomLogic; private streamWithOrchestrator; private streamWithCustomLogicOrchestrator; private streamWithPromptBasedOrchestrator; private streamWithString; private streamWithMessages; addTool(tool: OpenAgenticTool): void; removeTool(toolName: string): void; getTool(toolName: string): OpenAgenticTool | undefined; getAllTools(): OpenAgenticTool[]; switchModel(model: string | AIModel): void; getModelInfo(): any; getOrchestrator(): BaseOrchestrator | undefined; setOrchestrator(orchestrator: string | BaseOrchestrator | undefined): void; hasOrchestrator(): boolean; getMessages(): Message[]; addMessage(message: Message): void; reset(): void; clear(): void; private createStepFinishCallback; private createChunkCallback; private createErrorCallback; private convertCoresToAISDK; private convertCoreToInternal; private convertToAISDKTools; private transformMessages; private log; private sanitizeForLogging; private resetExecutionStats; private calculateExecutionStats; } declare class ProviderManager { private static userApiKeys; /** * Set user-provided API keys that take precedence over environment variables */ static setUserApiKeys(apiKeys: ApiKeyMap | undefined): void; /** * Get AWS credentials from user API keys or environment variables */ static getAwsCredentials(): { accessKeyId?: string; secretAccessKey?: string; region?: string; bucketName?: string; }; /** * Get AWS Bedrock credentials from user API keys or environment variables */ static getBedrockCredentials(): { accessKeyId?: string; secretAccessKey?: string; region?: string; }; /** * Create a model configuration from a string or AIModel object * Automatically detects provider from model name if string is provided */ static createModel(input: string | AIModel): AIModel; /** * Create an AI SDK provider instance for the given model */ static createProvider(model: AIModel): Promise<any>; /** * Create a provider for a specific provider name (for tool context) */ static createProviderByName(providerName: string, apiKey?: string): Promise<any>; /** * Get all available providers and their models */ static getAllProviders(): Array<{ provider: string; models: string[]; }>; /** * Get supported models for a provider */ static getProviderModels(provider: string): string[]; /** * Check if a model is supported by a provider */ static isModelSupported(provider: string, model: string): boolean; /** * Get model information (context window, cost, description) */ static getModelInfo(provider: string, model: string): never; private static autoDetectProvider; private static validateAndNormalizeModel; private static getDefaultApiKey; } /** * Register an orchestrator in the global registry * @param orchestrator - The orchestrator to register * @throws {Error} If an orchestrator with the same ID already exists */ declare function registerOrchestrator(orchestrator: BaseOrchestrator): void; /** * Get an orchestrator by ID * @param id - The orchestrator ID * @returns The orchestrator instance or undefined if not found */ declare function getOrchestrator(id: string): BaseOrchestrator | undefined; /** * List all registered orchestrators * @returns Array of all registered orchestrators */ declare function listOrchestrators(): BaseOrchestrator[]; /** * Get orchestrators by type * @param type - The orchestrator type to filter by * @returns Array of orchestrators matching the type */ declare function getOrchestratorsByType(type: OrchestratorType): BaseOrchestrator[]; /** * Check if an orchestrator is registered * @param id - The orchestrator ID * @returns True if the orchestrator is registered */ declare function hasOrchestrator(id: string): boolean; /** * Unregister an orchestrator * @param id - The orchestrator ID to remove * @returns True if the orchestrator was removed, false if it didn't exist */ declare function unregisterOrchestrator(id: string): boolean; /** * Clear all registered orchestrators */ declare function clearOrchestratorRegistry(): void; /** * Get registry statistics * @returns Object with registry statistics */ declare function getRegistryStats(): { total: number; byType: Record<OrchestratorType, number>; orchestratorIds: string[]; }; /** * Resolve orchestrator from string ID or instance * @param orchestratorInput - String ID or orchestrator instance * @returns The orchestrator instance or undefined if not found */ declare function resolveOrchestrator(orchestratorInput: string | BaseOrchestrator | undefined): BaseOrchestrator | undefined; /** * Base class for prompt-based orchestrators * Overrides system prompts and filters tools to create specialized agents */ declare class PromptBasedOrchestratorClass implements PromptBasedOrchestrator { readonly id: string; readonly name: string; readonly description: string; readonly systemPrompt: string; readonly allowedTools: string[]; readonly type: "prompt-based"; constructor(id: string, name: string, description: string, systemPrompt: string, allowedTools?: string[]); getName(): string; getDescription(): string; getType(): 'prompt-based'; getSystemPrompt(): string; /** * Override system prompt based on context (can be customized by subclasses) */ buildSystemPrompt(context: OrchestratorContext): string; /** * Filter tools based on allowed tools list */ private filterTools; /** * Validate input before execution */ validate(input: string | CoreMessage[], context: OrchestratorContext): Promise<boolean>; /** * Optional initialization */ initialize(context: OrchestratorContext): Promise<void>; /** * Optional cleanup */ cleanup(context: OrchestratorContext): Promise<void>; /** * Execute the orchestration by creating a specialized orchestrator instance */ execute(input: string | CoreMessage[], context: OrchestratorContext): Promise<ExecutionResult>; } /** * Helper function to create prompt-based orchestrators */ declare function createPromptBasedOrchestrator(id: string, name: string, description: string, systemPrompt: string, allowedTools?: string[]): PromptBasedOrchestratorClass; /** * Abstract base class for custom logic orchestrators * Provides common functionality and helper methods for implementing custom orchestration logic */ declare abstract class CustomLogicOrchestratorClass implements CustomLogicOrchestrator { readonly id: string; readonly name: string; readonly description: string; readonly type: "custom-logic"; constructor(id: string, name: string, description: string); getName(): string; getDescription(): string; getType(): 'custom-logic'; /** * Abstract method that subclasses must implement * This is where the custom orchestration logic goes */ abstract customLogic(input: string | CoreMessage[], context: OrchestratorContext): Promise<any>; /** * Main execute method that calls customLogic and formats the result * Can be overridden by subclasses for more control */ execute(input: string | CoreMessage[], context: OrchestratorContext): Promise<ExecutionResult>; /** * Optional validation method */ validate(input: string | CoreMessage[], context: OrchestratorContext): Promise<boolean>; /** * Optional initialization method */ initialize(context: OrchestratorContext): Promise<void>; /** * Optional cleanup method */ cleanup(context: OrchestratorContext): Promise<void>; /** * Optional method to determine if custom logic should be used * Default implementation always returns true */ shouldUseCustomLogic(input: string | CoreMessage[], context: OrchestratorContext): boolean; /** * Helper to get available tools filtered by ID */ protected getToolsByIds(context: OrchestratorContext, toolIds: string[]): OpenAgenticTool[]; /** * Helper to execute a single tool */ protected executeTool(tool: OpenAgenticTool, parameters: any, context?: any): Promise<any>; /** * Helper to execute multiple tools in parallel */ protected executeToolsInParallel(toolExecutions: Array<{ tool: OpenAgenticTool; parameters: any; context?: any; }>): Promise<any[]>; /** * Helper to execute tools sequentially */ protected executeToolsInSequence(toolExecutions: Array<{ tool: OpenAgenticTool; parameters: any; context?: any; }>): Promise<any[]>; /** * Helper to call AI models directly (bypassing the orchestrator) */ protected callAIModel(prompt: string, modelOverride?: string, context?: OrchestratorContext): Promise<string>; /** * Helper to consolidate multiple results into a single response */ protected consolidateResults(results: any[], consolidationPrompt?: string): string; /** * Helper to get API keys for external services */ protected getApiKey(provider: string): string | undefined; /** * Helper to create structured output */ protected createStructuredOutput(data: any): string; /** * Helper to sanitize data for logging */ private sanitizeForLogging; } /** * Helper function to create custom logic orchestrators */ declare function createCustomLogicOrchestrator(id: string, name: string, description: string, customLogicImplementation: (input: string | CoreMessage[], context: OrchestratorContext) => Promise<any>): CustomLogicOrchestratorClass; /** * Result from parallel AI execution */ interface ParallelAIResult { modelId: string; provider: string; success: boolean; result?: string; error?: string; duration: number; usage?: { promptTokens: number; completionTokens: number; totalTokens: number; }; } /** * Result from tool chain execution */ interface ToolChainResult { step: number; toolId: string; success: boolean; result?: any; error?: string; duration: number; } /** * Analysis aggregation result */ interface AnalysisAggregation { primaryAnalysis: string; supportingAnalyses: string[]; synthesis: string; confidence: number; sources: string[]; } /** * Advanced helper class for multi-AI orchestrators * Provides sophisticated patterns for complex AI workflows */ declare abstract class MultiAIOrchestrator extends CustomLogicOrchestratorClass { constructor(id: string, name: string, description: string); /** * Execute multiple AI models in parallel with the same prompt * @param prompt - The prompt to send to all models * @param models - Array of model identifiers or configurations * @param options - Execution options * @returns Array of results from all models */ protected runInParallel(prompt: string, models: (string | AIModel)[], options?: { temperature?: number; maxTokens?: number; timeoutMs?: number; failFast?: boolean; retryCount?: number; }): Promise<ParallelAIResult[]>; /** * Consolidate results from parallel AI execution * @param results - Array of ParallelAIResult * @param strategy - Consolidation strategy * @returns Consolidated result */ protected consolidateResults(results: ParallelAIResult[], strategy?: 'best' | 'consensus' | 'weighted' | 'all'): string; /** * Create a model instance with specific configuration * @param modelConfig - Model identifier or full configuration * @param overrides - Parameter overrides * @returns Configured AIModel */ protected createModelInstance(modelConfig: string | AIModel, overrides?: { temperature?: number; maxTokens?: number; topP?: number; }): AIModel; /** * Execute a prompt with a specific model * @param prompt - The prompt to execute * @param model - Model configuration * @param options - Execution options * @returns Generation result */ protected executeWithModel(prompt: string, model: AIModel, options?: { temperature?: number; maxTokens?: number; topP?: number; }): Promise<{ text: string; usage?: { promptTokens: number; completionTokens: number; totalTokens: number; }; finishReason?: string; }>; /** * Merge multiple analyses into a comprehensive report * @param analyses - Array of analysis results * @param prompt - Optional merging prompt * @returns Merged analysis */ protected mergeAnalysis(analyses: Array<{ source: string; content: string; confidence?: number; metadata?: any; }>, prompt?: string): Promise<AnalysisAggregation>; /** * Synthesize findings from multiple sources * @param findings - Array of findings with metadata * @param options - Synthesis options * @returns Synthesized findings */ protected synthesizeFindings(findings: Array<{ title: string; content: string; source: string; importance?: number; tags?: string[]; }>, options?: { maxLength?: number; focusAreas?: string[]; style?: 'technical' | 'executive' | 'detailed'; }): Promise<string>; /** * Execute a chain of tools sequentially, passing results between them * @param toolChain - Array of tool configurations * @param context - Orchestrator context * @returns Array of tool chain results */ protected executeToolChain(toolChain: Array<{ tool: OpenAgenticTool; parameters: any | ((previousResults: ToolChainResult[]) => any); onSuccess?: (result: any, step: number) => void; onError?: (error: Error, step: number) => boolean; }>, context: OrchestratorContext): Promise<ToolChainResult[]>; /** * Upload result to S3 as HTML report * @param content - Content to upload * @param filename - Filename (without extension) * @param metadata - Additional metadata * @returns S3 URL */ protected uploadResult(content: string, filename: string, metadata?: { title?: string; description?: string; author?: string; tags?: string[]; }): Promise<string>; /** * Format content as a professional HTML report * @param content - Report content (supports Markdown) * @param options - Formatting options * @returns Formatted HTML */ protected formatReport(content: string, options?: { title?: string; description?: string; author?: string; tags?: string[]; style?: 'professional' | 'minimal' | 'detailed'; }): string; /** * Handle partial failures in multi-AI operations * @param results - Array of results with success/failure status * @param options - Recovery options * @returns Recovery strategy result */ protected handlePartialFailures<T extends { success: boolean; error?: string; }>(results: T[], options?: { minimumSuccessRate?: number; fallbackStrategy?: 'continue' | 'retry' | 'abort'; maxRetries?: number; }): { shouldContinue: boolean; successfulResults: T[]; failedResults: T[]; recommendation: string; }; /** * Sanitize sensitive data for logging * @param data - Data to sanitize * @returns Sanitized data */ private sanitizeDataForLogging; } /** * Video Creator Orchestrator * Specialized orchestrator for creating professional videos using a 4-stage workflow */ declare const videoCreatorOrchestrator: PromptBasedOrchestratorClass; /** * Code Assessment Orchestrator * * Implements a sophisticated workflow for comprehensive code analysis: * 1. Fetch code from GitHub repository * 2. Parallel analysis with Claude (code quality) and Gemini (technical depth) * 3. GPT-4o synthesizes findings into executive summary * 4. Generate comprehensive markdown report * 5. Upload to S3 as professional HTML report * 6. Generate PR diff and upload to S3 as text file (if requested) */ declare class CodeAssessmentOrchestrator extends MultiAIOrchestrator { constructor(); /** * Custom logic implementation for code assessment */ customLogic(input: string | CoreMessage[], context: OrchestratorContext): Promise<any>; /** * Parse repository information and additional directory paths from input and orchestratorParams */ private parseRepositoryInfo; /** * Fetch repository code using GitHub tool */ private fetchRepositoryCode; /** * Explore specific repository directories for source code */ private exploreRepositoryStructure; /** * Perform specialized parallel analysis with Claude and Gemini */ private performParallelAnalysis; /** * Prepare code context for analysis with less aggressive truncation */ private prepareCodeContext; /** * Synthesize findings using GPT-4o */ private synthesizeAnalysisFindings; /** * Parse GPT-4o synthesis response into structured sections */ private parseSynthesisResponse; /** * Create fallback synthesis if GPT-4o fails */ private createFallbackSynthesis; /** * Generate final comprehensive report */ private generateFinalReport; /** * Generate PR diff with suggested improvements using Anthropic with increased token limits */ private generatePRDiff; /** * Upload diff content to S3 as a text file */ private uploadDiffToS3; /** * Prepare enhanced code context for diff generation with more comprehensive content */ private prepareEnhancedCodeContextForDiff; } declare const codeAssessmentOrchestrator: CodeAssessmentOrchestrator; /** * Enhanced Image Generation Orchestrator * Specialized orchestrator that enhances image prompts using GPT-4o before generating images */ declare const enhancedImageGenerationOrchestrator: PromptBasedOrchestratorClass; /** * News Specialist Orchestrator * * Uses an OpenAI agent with news analysis tools to gather information intelligently, * then generates professional HTML reports with visual content. * * Workflow: * 1. OpenAI agent uses news tools (perplexity, newsdata, websearch) intelligently * 2. AI-powered analysis and insight generation with visual content * 3. Professional HTML report generation for web publication */ declare class NewsSpecialistOrchestrator extends MultiAIOrchestrator { constructor(); /** * Custom logic implementation using OpenAI agent for news analysis */ customLogic(input: string | CoreMessage[], context: OrchestratorContext): Promise<any>; /** * Create OpenAI agent with news analysis tools */ private createNewsAnalysisAgent; /** * Run news analysis using OpenAI agent */ private runNewsAnalysisAgent; /** * Parse analysis parameters from user input */ private parseAnalysisParameters; /** * Generate visual content for the news article */ private generateVisualContent; /** * Create image prompts based on analysis content */ private createImagePrompts; /** * Extract keywords for image generation */ private extractKeywords; /** * Consolidate all findings into a structured report */ private consolidateFindings; /** * Generate professional HTML report using HTML Composer tool */ private generateHtmlReport; /** * Extract topic from content for title generation */ private extractTopicFromContent; /** * Extract a section from structured text */ private extractSection; /** * Extract list items from structured text */ private extractListItems; } declare const newsSpecialistOrchestrator: NewsSpecialistOrchestrator; /** * Flash Headlines Orchestrator * * Implements a fast workflow for generating top 10 news headlines with accompanying images: * 1. Use Gemini Flash to generate 10 structured headlines with titles and subtitles * 2. Use Gemini Image Generator to create relevant 500x500 photos for each headline * 3. Return structured array of {title, subtitle, imgSrc} objects ready for display */ declare class FlashHeadlinesOrchestrator extends MultiAIOrchestrator { constructor(); /** * Custom logic implementation for flash headlines generation */ customLogic(input: string | CoreMessage[], context: OrchestratorContext): Promise<any>; /** * Parse topic from user input, default to general news if no specific topic */ private parseTopicFromInput; /** * Generate 10 structured headlines using Gemini Flash */ private generateHeadlines; /** * Parse headlines from Gemini's JSON response */ private parseHeadlinesFromResponse; /** * Fallback method to extract headlines from unstructured text */ private extractHeadlinesFromText; /** * Generate 500x500 images for each headline using Gemini Image Generator */ private generateHeadlineImages; /** * Create descriptive prompt for generating news-related images */ private createImagePrompt; } declare const flashHeadlinesOrchestrator: FlashHeadlinesOrchestrator; interface OrchestratorInfo { id: string; name: string; description: string; type: 'prompt-based' | 'custom-logic' | 'multi-ai'; systemPrompt?: string; allowedTools?: string[]; } declare const orchestratorDescriptions: OrchestratorInfo[]; declare function loadBuiltInOrchestrators(): void; /** * Create a standard agent for non-streaming execution */ declare function createAgent(options: { model: string | AIModel; tools?: any[]; systemPrompt?: string; maxIterations?: number; customLogic?: (input: string, context: any) => Promise<any>; enableDebugLogging?: boolean; logLevel?: 'none' | 'basic' | 'detailed'; enableStepLogging?: boolean; enableToolLogging?: boolean; enableTimingLogging?: boolean; enableStatisticsLogging?: boolean; enableStreamingLogging?: boolean; /** * API keys for various providers. Can include: * - Provider keys: openai, anthropic, google, etc. * - AWS credentials: awsAccessKeyId, awsSecretAccessKey, awsRegion, awsS3Bucket * - Bedrock credentials: bedrockAccessKeyId, bedrockSecretAccessKey, bedrockRegion */ apiKeys?: ApiKeyMap; } & OrchestratorOptions): Orchestrator; /** * Create a streaming agent for real-time response streaming */ declare function createStreamingAgent(options: { model: string | AIModel; tools?: any[]; systemPrompt?: string; maxIterations?: number; enableDebugLogging?: boolean; logLevel?: 'none' | 'basic' | 'detailed'; enableStepLogging?: boolean; enableToolLogging?: boolean; enableTimingLogging?: boolean; enableStatisticsLogging?: boolean; enableStreamingLogging?: boolean; onFinish?: (result: any) => void | Promise<void>; /** * API keys for various providers. Can include: * - Provider keys: openai, anthropic, google, etc. * - AWS credentials: awsAccessKeyId, awsSecretAccessKey, awsRegion, awsS3Bucket * - Bedrock credentials: bedrockAccessKeyId, bedrockSecretAccessKey, bedrockRegion */ apiKeys?: ApiKeyMap; } & OrchestratorOptions): StreamingOrchestrator; export { AIModel, ApiKeyMap, BaseOrchestrator, CoreMessage, CustomLogicOrchestrator, CustomLogicOrchestratorClass, ExecutionResult, LogLevel, Message, MultiAIOrchestrator, OpenAgenticTool, Orchestrator, OrchestratorContext, type OrchestratorInfo, OrchestratorOptions, OrchestratorType, PromptBasedOrchestrator, PromptBasedOrchestratorClass, ProviderManager, StreamingOrchestrator, clearOrchestratorRegistry, codeAssessmentOrchestrator, createAgent, createCustomLogicOrchestrator, createPromptBasedOrchestrator, createStreamingAgent, enhancedImageGenerationOrchestrator, flashHeadlinesOrchestrator, getOrchestrator, getOrchestratorsByType, getRegistryStats, hasOrchestrator, listOrchestrators, loadBuiltInOrchestrators, newsSpecialistOrchestrator, orchestratorDescriptions, registerOrchestrator, resolveOrchestrator, unregisterOrchestrator, videoCreatorOrchestrator };