UNPKG

@juspay/neurolink

Version:

Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and

916 lines (915 loc) 34.8 kB
/** * NeuroLink - Unified AI Interface with Real MCP Tool Integration * * REDESIGNED FALLBACK CHAIN - NO CIRCULAR DEPENDENCIES * Enhanced AI provider system with natural MCP tool access. * Uses real MCP infrastructure for tool discovery and execution. */ import type { TextGenerationOptions, TextGenerationResult } from "./core/types.js"; import type { GenerateOptions, GenerateResult } from "./types/generateTypes.js"; import type { StreamOptions, StreamResult } from "./types/streamTypes.js"; import type { MCPServerInfo, MCPExecutableTool } from "./types/mcpTypes.js"; import type { JsonObject } from "./types/common.js"; import type { BatchOperationResult } from "./types/typeAliases.js"; import { EventEmitter } from "events"; import type { ConversationMemoryConfig, ChatMessage } from "./types/conversationTypes.js"; import type { ExternalMCPServerInstance, ExternalMCPOperationResult, ExternalMCPToolInfo } from "./types/externalMcp.js"; export interface ProviderStatus { provider: string; status: "working" | "failed" | "not-configured"; configured: boolean; authenticated: boolean; error?: string; responseTime?: number; model?: string; } export interface MCPStatus { mcpInitialized: boolean; totalServers: number; availableServers: number; autoDiscoveredCount: number; totalTools: number; autoDiscoveredServers: MCPServerInfo[]; customToolsCount: number; inMemoryServersCount: number; externalMCPServersCount?: number; externalMCPConnectedCount?: number; externalMCPFailedCount?: number; externalMCPServers?: MCPServerInfo[]; error?: string; [key: string]: unknown; } export declare class NeuroLink { private mcpInitialized; private emitter; private autoDiscoveredServerInfos; private externalServerManager; private toolCircuitBreakers; private toolExecutionMetrics; /** * Helper method to emit tool end event in a consistent way * Used by executeTool in both success and error paths * @param toolName - Name of the tool * @param startTime - Timestamp when tool execution started * @param success - Whether the tool execution was successful * @param result - The result of the tool execution (optional) * @param error - The error if execution failed (optional) */ private emitToolEndEvent; private conversationMemory?; /** * Creates a new NeuroLink instance for AI text generation with MCP tool integration. * * @param config - Optional configuration object * @param config.conversationMemory - Configuration for conversation memory features * @param config.conversationMemory.enabled - Whether to enable conversation memory (default: false) * @param config.conversationMemory.maxSessions - Maximum number of concurrent sessions (default: 100) * @param config.conversationMemory.maxTurnsPerSession - Maximum conversation turns per session (default: 50) * * @example * ```typescript * // Basic usage * const neurolink = new NeuroLink(); * * // With conversation memory * const neurolink = new NeuroLink({ * conversationMemory: { * enabled: true, * maxSessions: 50, * maxTurnsPerSession: 20 * } * }); * ``` * * @throws {Error} When provider registry setup fails * @throws {Error} When conversation memory initialization fails (if enabled) * @throws {Error} When external server manager initialization fails */ constructor(config?: { conversationMemory?: Partial<ConversationMemoryConfig>; }); /** * Log constructor start with comprehensive environment analysis */ private logConstructorStart; /** * Initialize provider registry with security settings */ private initializeProviderRegistry; /** * Initialize conversation memory if enabled */ private initializeConversationMemory; /** * Initialize external server manager with event handlers */ private initializeExternalServerManager; /** * Setup event handlers for external server manager */ private setupExternalServerEventHandlers; /** * Log constructor completion with final state summary */ private logConstructorComplete; /** * Initialize MCP registry with enhanced error handling and resource cleanup * Uses isolated async context to prevent hanging */ private initializeMCP; /** * Log MCP initialization start */ private logMCPInitStart; /** * Log MCP already initialized */ private logMCPAlreadyInitialized; /** * Import performance manager with error handling */ private importPerformanceManager; /** * Perform main MCP initialization logic */ private performMCPInitialization; /** * Initialize tool registry with timeout protection */ private initializeToolRegistryInternal; /** * Initialize provider registry */ private initializeProviderRegistryInternal; /** * Register direct tools server */ private registerDirectToolsServerInternal; /** * Load MCP configuration from .mcp-config.json */ private loadMCPConfigurationInternal; /** * Log MCP initialization completion */ private logMCPInitComplete; /** * MAIN ENTRY POINT: Enhanced generate method with new function signature * Replaces both generateText and legacy methods */ /** * Extracts the original prompt text from the provided input. * If a string is provided, it returns the string directly. * If a GenerateOptions object is provided, it returns the input text from the object. * @param optionsOrPrompt The prompt input, either as a string or a GenerateOptions object. * @returns The original prompt text as a string. */ private _extractOriginalPrompt; /** * Generate AI content using the best available provider with MCP tool integration. * This is the primary method for text generation with full feature support. * * @param optionsOrPrompt - Either a string prompt or a comprehensive GenerateOptions object * @param optionsOrPrompt.input - Input configuration object * @param optionsOrPrompt.input.text - The text prompt to send to the AI (required) * @param optionsOrPrompt.provider - AI provider to use ('auto', 'openai', 'anthropic', etc.) * @param optionsOrPrompt.model - Specific model to use (e.g., 'gpt-4', 'claude-3-opus') * @param optionsOrPrompt.temperature - Randomness in response (0.0 = deterministic, 2.0 = very random) * @param optionsOrPrompt.maxTokens - Maximum tokens in response * @param optionsOrPrompt.systemPrompt - System message to set AI behavior * @param optionsOrPrompt.disableTools - Whether to disable MCP tool usage * @param optionsOrPrompt.enableAnalytics - Whether to include usage analytics * @param optionsOrPrompt.enableEvaluation - Whether to include response quality evaluation * @param optionsOrPrompt.context - Additional context for the request * @param optionsOrPrompt.evaluationDomain - Domain for specialized evaluation * @param optionsOrPrompt.toolUsageContext - Context for tool usage decisions * * @returns Promise resolving to GenerateResult with content, usage data, and optional analytics * * @example * ```typescript * // Simple usage with string prompt * const result = await neurolink.generate("What is artificial intelligence?"); * console.log(result.content); * * // Advanced usage with options * const result = await neurolink.generate({ * input: { text: "Explain quantum computing" }, * provider: "openai", * model: "gpt-4", * temperature: 0.7, * maxTokens: 500, * enableAnalytics: true, * enableEvaluation: true, * context: { domain: "science", level: "intermediate" } * }); * * // Access analytics and evaluation data * console.log(result.analytics?.usage); * console.log(result.evaluation?.relevance); * ``` * * @throws {Error} When input text is missing or invalid * @throws {Error} When all providers fail to generate content * @throws {Error} When conversation memory operations fail (if enabled) */ generate(optionsOrPrompt: GenerateOptions | string): Promise<GenerateResult>; /** * BACKWARD COMPATIBILITY: Legacy generateText method * Internally calls generate() and converts result format */ generateText(options: TextGenerationOptions): Promise<TextGenerationResult>; /** * REDESIGNED INTERNAL GENERATION - NO CIRCULAR DEPENDENCIES * * This method implements a clean fallback chain: * 1. Initialize conversation memory if enabled * 2. Inject conversation history into prompt * 3. Try MCP-enhanced generation if available * 4. Fall back to direct provider generation * 5. Store conversation turn for future context */ private generateTextInternal; /** * Log generateTextInternal start with comprehensive analysis */ private logGenerateTextInternalStart; /** * Emit generation start events */ private emitGenerationStartEvents; /** * Initialize conversation memory for generation */ private initializeConversationMemoryForGeneration; /** * Attempt MCP generation with retry logic */ private attemptMCPGeneration; /** * Perform MCP generation with retry logic */ private performMCPGenerationRetries; /** * Try MCP-enhanced generation (no fallback recursion) */ private tryMCPGeneration; /** * Direct provider generation (no MCP, no recursion) */ private directProviderGeneration; /** * Create tool-aware system prompt that informs AI about available tools */ private createToolAwareSystemPrompt; /** * Execute tools if available through centralized registry * Simplified approach without domain detection - relies on tool registry */ private detectAndExecuteTools; /** * Enhance prompt with tool results (domain-agnostic) */ private enhancePromptWithToolResults; /** * BACKWARD COMPATIBILITY: Legacy streamText method * Internally calls stream() and converts result format */ streamText(prompt: string, options?: Partial<StreamOptions>): Promise<AsyncIterable<string>>; /** * Stream AI-generated content in real-time using the best available provider. * This method provides real-time streaming of AI responses with full MCP tool integration. * * @param options - Stream configuration options * @param options.input - Input configuration object * @param options.input.text - The text prompt to send to the AI (required) * @param options.provider - AI provider to use ('auto', 'openai', 'anthropic', etc.) * @param options.model - Specific model to use (e.g., 'gpt-4', 'claude-3-opus') * @param options.temperature - Randomness in response (0.0 = deterministic, 2.0 = very random) * @param options.maxTokens - Maximum tokens in response * @param options.systemPrompt - System message to set AI behavior * @param options.disableTools - Whether to disable MCP tool usage * @param options.enableAnalytics - Whether to include usage analytics * @param options.enableEvaluation - Whether to include response quality evaluation * @param options.context - Additional context for the request * @param options.evaluationDomain - Domain for specialized evaluation * * @returns Promise resolving to StreamResult with an async iterable stream * * @example * ```typescript * // Basic streaming usage * const result = await neurolink.stream({ * input: { text: "Tell me a story about space exploration" } * }); * * // Consume the stream * for await (const chunk of result.stream) { * process.stdout.write(chunk.content); * } * * // Advanced streaming with options * const result = await neurolink.stream({ * input: { text: "Explain machine learning" }, * provider: "openai", * model: "gpt-4", * temperature: 0.7, * enableAnalytics: true, * context: { domain: "education", audience: "beginners" } * }); * * // Access metadata and analytics * console.log(result.provider); * console.log(result.analytics?.usage); * ``` * * @throws {Error} When input text is missing or invalid * @throws {Error} When all providers fail to generate content * @throws {Error} When conversation memory operations fail (if enabled) */ stream(options: StreamOptions): Promise<StreamResult>; /** * Log stream entry point with comprehensive analysis */ private logStreamEntryPoint; /** * Log performance baseline */ private logPerformanceBaseline; /** * Validate stream input with comprehensive error reporting */ private validateStreamInput; /** * Emit stream start events */ private emitStreamStartEvents; /** * Create MCP stream */ private createMCPStream; /** * Process stream result */ private processStreamResult; /** * Emit stream end events */ private emitStreamEndEvents; /** * Create stream response */ private createStreamResponse; /** * Handle stream error with fallback */ private handleStreamError; /** * Get the EventEmitter instance to listen to NeuroLink events for real-time monitoring and debugging. * This method provides access to the internal event system that emits events during AI generation, * tool execution, streaming, and other operations for comprehensive observability. * * @returns EventEmitter instance that emits various NeuroLink operation events * * @example * ```typescript * // Basic event listening setup * const neurolink = new NeuroLink(); * const emitter = neurolink.getEventEmitter(); * * // Listen to generation events * emitter.on('generation:start', (event) => { * console.log(`Generation started with provider: ${event.provider}`); * console.log(`Started at: ${new Date(event.timestamp)}`); * }); * * emitter.on('generation:end', (event) => { * console.log(`Generation completed in ${event.responseTime}ms`); * console.log(`Tools used: ${event.toolsUsed?.length || 0}`); * }); * * // Listen to streaming events * emitter.on('stream:start', (event) => { * console.log(`Streaming started with provider: ${event.provider}`); * }); * * emitter.on('stream:end', (event) => { * console.log(`Streaming completed in ${event.responseTime}ms`); * if (event.fallback) console.log('Used fallback streaming'); * }); * * // Listen to tool execution events * emitter.on('tool:start', (event) => { * console.log(`Tool execution started: ${event.toolName}`); * }); * * emitter.on('tool:end', (event) => { * console.log(`Tool ${event.toolName} ${event.success ? 'succeeded' : 'failed'}`); * console.log(`Execution time: ${event.responseTime}ms`); * }); * * // Listen to tool registration events * emitter.on('tools-register:start', (event) => { * console.log(`Registering tool: ${event.toolName}`); * }); * * emitter.on('tools-register:end', (event) => { * console.log(`Tool registration ${event.success ? 'succeeded' : 'failed'}: ${event.toolName}`); * }); * * // Listen to external MCP server events * emitter.on('externalMCP:serverConnected', (event) => { * console.log(`External MCP server connected: ${event.serverId}`); * console.log(`Tools available: ${event.toolCount || 0}`); * }); * * emitter.on('externalMCP:serverDisconnected', (event) => { * console.log(`External MCP server disconnected: ${event.serverId}`); * console.log(`Reason: ${event.reason || 'Unknown'}`); * }); * * emitter.on('externalMCP:toolDiscovered', (event) => { * console.log(`New tool discovered: ${event.toolName} from ${event.serverId}`); * }); * * // Advanced usage with error handling * emitter.on('error', (error) => { * console.error('NeuroLink error:', error); * }); * * // Clean up event listeners when done * function cleanup() { * emitter.removeAllListeners(); * } * * process.on('SIGINT', cleanup); * process.on('SIGTERM', cleanup); * ``` * * @example * ```typescript * // Advanced monitoring with metrics collection * const neurolink = new NeuroLink(); * const emitter = neurolink.getEventEmitter(); * const metrics = { * generations: 0, * totalResponseTime: 0, * toolExecutions: 0, * failures: 0 * }; * * // Collect performance metrics * emitter.on('generation:end', (event) => { * metrics.generations++; * metrics.totalResponseTime += event.responseTime; * metrics.toolExecutions += event.toolsUsed?.length || 0; * }); * * emitter.on('tool:end', (event) => { * if (!event.success) { * metrics.failures++; * } * }); * * // Log metrics every 10 seconds * setInterval(() => { * const avgResponseTime = metrics.generations > 0 * ? metrics.totalResponseTime / metrics.generations * : 0; * * console.log('NeuroLink Metrics:', { * totalGenerations: metrics.generations, * averageResponseTime: `${avgResponseTime.toFixed(2)}ms`, * totalToolExecutions: metrics.toolExecutions, * failureRate: `${((metrics.failures / (metrics.toolExecutions || 1)) * 100).toFixed(2)}%` * }); * }, 10000); * ``` * * **Available Events:** * * **Generation Events:** * - `generation:start` - Fired when text generation begins * - `{ provider: string, timestamp: number }` * - `generation:end` - Fired when text generation completes * - `{ provider: string, responseTime: number, toolsUsed?: string[], timestamp: number }` * * **Streaming Events:** * - `stream:start` - Fired when streaming begins * - `{ provider: string, timestamp: number }` * - `stream:end` - Fired when streaming completes * - `{ provider: string, responseTime: number, fallback?: boolean }` * * **Tool Events:** * - `tool:start` - Fired when tool execution begins * - `{ toolName: string, timestamp: number }` * - `tool:end` - Fired when tool execution completes * - `{ toolName: string, responseTime: number, success: boolean, timestamp: number }` * - `tools-register:start` - Fired when tool registration begins * - `{ toolName: string, timestamp: number }` * - `tools-register:end` - Fired when tool registration completes * - `{ toolName: string, success: boolean, timestamp: number }` * * **External MCP Events:** * - `externalMCP:serverConnected` - Fired when external MCP server connects * - `{ serverId: string, toolCount?: number, timestamp: number }` * - `externalMCP:serverDisconnected` - Fired when external MCP server disconnects * - `{ serverId: string, reason?: string, timestamp: number }` * - `externalMCP:serverFailed` - Fired when external MCP server fails * - `{ serverId: string, error: string, timestamp: number }` * - `externalMCP:toolDiscovered` - Fired when external MCP tool is discovered * - `{ toolName: string, serverId: string, timestamp: number }` * - `externalMCP:toolRemoved` - Fired when external MCP tool is removed * - `{ toolName: string, serverId: string, timestamp: number }` * - `externalMCP:serverAdded` - Fired when external MCP server is added * - `{ serverId: string, config: MCPServerInfo, toolCount: number, timestamp: number }` * - `externalMCP:serverRemoved` - Fired when external MCP server is removed * - `{ serverId: string, timestamp: number }` * * **Error Events:** * - `error` - Fired when an error occurs * - `{ error: Error, context?: object }` * * @throws {Error} This method does not throw errors as it returns the internal EventEmitter * * @since 1.0.0 * @see {@link https://nodejs.org/api/events.html} Node.js EventEmitter documentation * @see {@link NeuroLink.generate} for events related to text generation * @see {@link NeuroLink.stream} for events related to streaming * @see {@link NeuroLink.executeTool} for events related to tool execution */ getEventEmitter(): EventEmitter<[never]>; /** * Register a custom tool that will be available to all AI providers * @param name - Unique name for the tool * @param tool - Tool in MCPExecutableTool format (unified MCP protocol type) */ registerTool(name: string, tool: MCPExecutableTool): void; /** * Register multiple tools at once - Supports both object and array formats * @param tools - Object mapping tool names to MCPExecutableTool format OR Array of tools with names * * Object format (existing): { toolName: MCPExecutableTool, ... } * Array format (Lighthouse compatible): [{ name: string, tool: MCPExecutableTool }, ...] */ registerTools(tools: Record<string, MCPExecutableTool> | Array<{ name: string; tool: MCPExecutableTool; }>): void; /** * Unregister a custom tool * @param name - Name of the tool to remove * @returns true if the tool was removed, false if it didn't exist */ unregisterTool(name: string): boolean; /** * Get all registered custom tools * @returns Map of tool names to MCPExecutableTool format */ getCustomTools(): Map<string, MCPExecutableTool>; /** * Add an in-memory MCP server (from git diff) * Allows registration of pre-instantiated server objects * @param serverId - Unique identifier for the server * @param serverInfo - Server configuration */ addInMemoryMCPServer(serverId: string, serverInfo: MCPServerInfo): Promise<void>; /** * Get all registered in-memory servers * @returns Map of server IDs to MCPServerInfo */ getInMemoryServers(): Map<string, MCPServerInfo>; /** * Get in-memory servers as MCPServerInfo - ZERO conversion needed * Now fetches from centralized tool registry instead of local duplication * @returns Array of MCPServerInfo */ getInMemoryServerInfos(): MCPServerInfo[]; /** * Get auto-discovered servers as MCPServerInfo - ZERO conversion needed * @returns Array of MCPServerInfo */ getAutoDiscoveredServerInfos(): MCPServerInfo[]; /** * Execute a specific tool by name with robust error handling * Supports both custom tools and MCP server tools with timeout, retry, and circuit breaker patterns * @param toolName - Name of the tool to execute * @param params - Parameters to pass to the tool * @param options - Execution options * @returns Tool execution result */ executeTool<T = unknown>(toolName: string, params?: unknown, options?: { timeout?: number; maxRetries?: number; retryDelayMs?: number; }): Promise<T>; /** * Internal tool execution method (extracted for better error handling) */ private executeToolInternal; /** * Get all available tools including custom and in-memory ones * @returns Array of available tools with metadata */ getAllAvailableTools(): Promise<{ name: string; description: string; server: string; category?: string; inputSchema?: import("./types/typeAliases.js").StandardRecord; }[]>; /** * Get comprehensive status of all AI providers * Primary method for provider health checking and diagnostics */ getProviderStatus(options?: { quiet?: boolean; }): Promise<ProviderStatus[]>; /** * Test a specific AI provider's connectivity and authentication * @param providerName - Name of the provider to test * @returns Promise resolving to true if provider is working */ testProvider(providerName: string): Promise<boolean>; /** * Internal method to test provider connection with minimal generation call */ private testProviderConnection; /** * Get the best available AI provider based on configuration and availability * @param requestedProvider - Optional preferred provider name * @returns Promise resolving to the best provider name */ getBestProvider(requestedProvider?: string): Promise<string>; /** * Get list of all available AI provider names * @returns Array of supported provider names */ getAvailableProviders(): Promise<string[]>; /** * Validate if a provider name is supported * @param providerName - Provider name to validate * @returns True if provider name is valid */ isValidProvider(providerName: string): Promise<boolean>; /** * Get comprehensive MCP (Model Context Protocol) status information * @returns Promise resolving to MCP status details */ getMCPStatus(): Promise<MCPStatus>; /** * List all configured MCP servers with their status * @returns Promise resolving to array of MCP server information */ listMCPServers(): Promise<MCPServerInfo[]>; /** * Test connectivity to a specific MCP server * @param serverId - ID of the MCP server to test * @returns Promise resolving to true if server is reachable */ testMCPServer(serverId: string): Promise<boolean>; /** * Check if a provider has the required environment variables configured * @param providerName - Name of the provider to check * @returns Promise resolving to true if provider has required env vars */ hasProviderEnvVars(providerName: string): Promise<boolean>; /** * Perform comprehensive health check on a specific provider * @param providerName - Name of the provider to check * @param options - Health check options * @returns Promise resolving to detailed health status */ checkProviderHealth(providerName: string, options?: { timeout?: number; includeConnectivityTest?: boolean; includeModelValidation?: boolean; cacheResults?: boolean; }): Promise<{ provider: string; isHealthy: boolean; isConfigured: boolean; hasApiKey: boolean; lastChecked: Date; error?: string; warning?: string; responseTime?: number; configurationIssues: string[]; recommendations: string[]; }>; /** * Check health of all supported providers * @param options - Health check options * @returns Promise resolving to array of health statuses for all providers */ checkAllProvidersHealth(options?: { timeout?: number; includeConnectivityTest?: boolean; includeModelValidation?: boolean; cacheResults?: boolean; }): Promise<Array<{ provider: string; isHealthy: boolean; isConfigured: boolean; hasApiKey: boolean; lastChecked: Date; error?: string; warning?: string; responseTime?: number; configurationIssues: string[]; recommendations: string[]; }>>; /** * Get a summary of provider health across all supported providers * @returns Promise resolving to health summary statistics */ getProviderHealthSummary(): Promise<{ total: number; healthy: number; configured: number; hasIssues: number; healthyProviders: string[]; unhealthyProviders: string[]; recommendations: string[]; }>; /** * Clear provider health cache (useful for re-testing after configuration changes) * @param providerName - Optional specific provider to clear cache for */ clearProviderHealthCache(providerName?: string): Promise<void>; /** * Get execution metrics for all tools * @returns Object with execution metrics for each tool */ getToolExecutionMetrics(): Record<string, { totalExecutions: number; successfulExecutions: number; failedExecutions: number; successRate: number; averageExecutionTime: number; lastExecutionTime: number; }>; /** * Get circuit breaker status for all tools * @returns Object with circuit breaker status for each tool */ getToolCircuitBreakerStatus(): Record<string, { state: "closed" | "open" | "half-open"; failureCount: number; isHealthy: boolean; }>; /** * Reset circuit breaker for a specific tool * @param toolName - Name of the tool to reset circuit breaker for */ resetToolCircuitBreaker(toolName: string): void; /** * Clear all tool execution metrics */ clearToolExecutionMetrics(): void; /** * Get comprehensive tool health report * @returns Detailed health report for all tools */ getToolHealthReport(): Promise<{ totalTools: number; healthyTools: number; unhealthyTools: number; tools: Record<string, { name: string; isHealthy: boolean; metrics: { totalExecutions: number; successRate: number; averageExecutionTime: number; lastExecutionTime: number; }; circuitBreaker: { state: "closed" | "open" | "half-open"; failureCount: number; }; issues: string[]; recommendations: string[]; }>; }>; /** * Get conversation memory statistics (public API) */ getConversationStats(): Promise<import("./types/conversationTypes.js").ConversationMemoryStats>; /** * Get complete conversation history for a specific session (public API) * @param sessionId - The session ID to retrieve history for * @returns Array of ChatMessage objects in chronological order, or empty array if session doesn't exist */ getConversationHistory(sessionId: string): Promise<ChatMessage[]>; /** * Clear conversation history for a specific session (public API) */ clearConversationSession(sessionId: string): Promise<boolean>; /** * Clear all conversation history (public API) */ clearAllConversations(): Promise<void>; /** * Add an external MCP server * Automatically discovers and registers tools from the server * @param serverId - Unique identifier for the server * @param config - External MCP server configuration * @returns Operation result with server instance */ addExternalMCPServer(serverId: string, config: MCPServerInfo): Promise<ExternalMCPOperationResult<ExternalMCPServerInstance>>; /** * Remove an external MCP server * Stops the server and removes all its tools * @param serverId - ID of the server to remove * @returns Operation result */ removeExternalMCPServer(serverId: string): Promise<ExternalMCPOperationResult<void>>; /** * List all external MCP servers * @returns Array of server health information */ listExternalMCPServers(): Array<{ serverId: string; status: string; toolCount: number; uptime: number; isHealthy: boolean; config: MCPServerInfo; }>; /** * Get external MCP server status * @param serverId - ID of the server * @returns Server instance or undefined if not found */ getExternalMCPServer(serverId: string): ExternalMCPServerInstance | undefined; /** * Execute a tool from an external MCP server * @param serverId - ID of the server * @param toolName - Name of the tool * @param parameters - Tool parameters * @param options - Execution options * @returns Tool execution result */ executeExternalMCPTool(serverId: string, toolName: string, parameters: JsonObject, options?: { timeout?: number; }): Promise<unknown>; /** * Get all tools from external MCP servers * @returns Array of external tool information */ getExternalMCPTools(): ExternalMCPToolInfo[]; /** * Get tools from a specific external MCP server * @param serverId - ID of the server * @returns Array of tool information for the server */ getExternalMCPServerTools(serverId: string): ExternalMCPToolInfo[]; /** * Test connection to an external MCP server * @param config - Server configuration to test * @returns Test result with connection status */ testExternalMCPConnection(config: MCPServerInfo): Promise<BatchOperationResult>; /** * Get external MCP server manager statistics * @returns Statistics about external servers and tools */ getExternalMCPStatistics(): { totalServers: number; connectedServers: number; failedServers: number; totalTools: number; totalConnections: number; totalErrors: number; }; /** * Shutdown all external MCP servers * Called automatically on process exit */ shutdownExternalMCPServers(): Promise<void>; /** * Convert external MCP tools to Vercel AI SDK tool format * This allows AI providers to use external tools directly */ private convertExternalMCPToolsToAISDKFormat; /** * Convert JSON Schema to AI SDK compatible format * For now, we'll skip schema validation and let the AI SDK handle parameters dynamically */ private convertJSONSchemaToAISDKFormat; /** * Unregister external MCP tools from a specific server */ private unregisterExternalMCPToolsFromRegistry; /** * Unregister a specific external MCP tool from the main registry */ private unregisterExternalMCPToolFromRegistry; /** * Unregister all external MCP tools from the main registry */ private unregisterAllExternalMCPToolsFromRegistry; } export declare const neurolink: NeuroLink; export default neurolink;