@juspay/neurolink
Version:
Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and
1,011 lines (1,010 loc) • 106 kB
JavaScript
/**
* NeuroLink - Unified AI Interface with Real MCP Tool Integration
*
* REDESIGNED FALLBACK CHAIN - NO CIRCULAR DEPENDENCIES
* Enhanced AI provider system with natural MCP tool access.
* Uses real MCP infrastructure for tool discovery and execution.
*/
// Load environment variables from .env file (critical for SDK usage)
import { config as dotenvConfig } from "dotenv";
try {
dotenvConfig(); // Load .env from current working directory
}
catch (error) {
// Environment variables should be set externally in production
}
import { AIProviderFactory } from "./core/factory.js";
import { mcpLogger } from "./utils/logger.js";
import { SYSTEM_LIMITS } from "./core/constants.js";
import pLimit from "p-limit";
import { toolRegistry } from "./mcp/toolRegistry.js";
import { logger } from "./utils/logger.js";
import { getBestProvider } from "./utils/providerUtils.js";
import { ProviderRegistry } from "./factories/providerRegistry.js";
import { createCustomToolServerInfo, detectCategory, } from "./utils/mcpDefaults.js";
// Factory processing imports
import { processFactoryOptions, enhanceTextGenerationOptions, validateFactoryConfig, processStreamingFactoryOptions, createCleanStreamOptions, } from "./utils/factoryProcessing.js";
// Transformation utilities
import { transformToolExecutions, transformToolExecutionsForMCP, transformAvailableTools, transformToolsForMCP, transformToolsToExpectedFormat, transformToolsToDescriptions, extractToolNames, transformParamsForLogging, optimizeToolForCollection, } from "./utils/transformationUtils.js";
// Enhanced error handling imports
import { ErrorFactory, NeuroLinkError, withTimeout, withRetry, isRetriableError, logStructuredError, CircuitBreaker, } from "./utils/errorHandling.js";
import { EventEmitter } from "events";
import { ConversationMemoryManager } from "./core/conversationMemoryManager.js";
import { applyConversationMemoryDefaults, getConversationMessages, storeConversationTurn, } from "./utils/conversationMemoryUtils.js";
import { ExternalServerManager } from "./mcp/externalServerManager.js";
// Import direct tools server for automatic registration
import { directToolsServer } from "./mcp/servers/agent/directToolsServer.js";
import { ContextManager } from "./context/ContextManager.js";
import { defaultContextConfig } from "./context/config.js";
import { isNonNullObject } from "./utils/typeUtils.js";
// Core types imported from core/types.js
export class NeuroLink {
mcpInitialized = false;
emitter = new EventEmitter();
contextManager = null;
autoDiscoveredServerInfos = [];
// External MCP server management
externalServerManager;
// Enhanced error handling support
toolCircuitBreakers = new Map();
toolExecutionMetrics = new Map();
/**
* Helper method to emit tool end event in a consistent way
* Used by executeTool in both success and error paths
* @param toolName - Name of the tool
* @param startTime - Timestamp when tool execution started
* @param success - Whether the tool execution was successful
*/
emitToolEndEvent(toolName, startTime, success) {
this.emitter.emit("tool:end", {
toolName,
responseTime: Date.now() - startTime,
success,
timestamp: Date.now(),
});
}
// Conversation memory support
conversationMemory;
constructor(config) {
// SDK always disables manual MCP config for security
ProviderRegistry.setOptions({
enableManualMCP: false,
});
// Initialize conversation memory if enabled
if (config?.conversationMemory?.enabled) {
const memoryConfig = applyConversationMemoryDefaults(config.conversationMemory);
this.conversationMemory = new ConversationMemoryManager(memoryConfig);
logger.info("NeuroLink initialized with conversation memory", {
maxSessions: memoryConfig.maxSessions,
maxTurnsPerSession: memoryConfig.maxTurnsPerSession,
});
}
// Initialize external server manager with main registry integration
this.externalServerManager = new ExternalServerManager({
maxServers: 20,
defaultTimeout: 15000,
enableAutoRestart: true,
enablePerformanceMonitoring: true,
}, {
enableMainRegistryIntegration: true, // Enable integration with main toolRegistry
});
// Forward external server events
this.externalServerManager.on("connected", (event) => {
this.emitter.emit("externalMCP:serverConnected", event);
});
this.externalServerManager.on("disconnected", (event) => {
this.emitter.emit("externalMCP:serverDisconnected", event);
});
this.externalServerManager.on("failed", (event) => {
this.emitter.emit("externalMCP:serverFailed", event);
});
this.externalServerManager.on("toolDiscovered", (event) => {
this.emitter.emit("externalMCP:toolDiscovered", event);
// Tools are already registered on server connection, no need to duplicate here
});
this.externalServerManager.on("toolRemoved", (event) => {
this.emitter.emit("externalMCP:toolRemoved", event);
// Unregister removed tools from main tool registry
this.unregisterExternalMCPToolFromRegistry(event.toolName);
});
}
/**
* Initialize MCP registry with enhanced error handling and resource cleanup
* Uses isolated async context to prevent hanging
*/
async initializeMCP() {
if (this.mcpInitialized) {
return;
}
// Track memory usage during MCP initialization
const { MemoryManager } = await import("./utils/performance.js");
const startMemory = MemoryManager.getMemoryUsageMB();
const initStartTime = Date.now();
try {
mcpLogger.debug("[NeuroLink] Starting isolated MCP initialization...");
// Initialize tool registry with timeout protection
const initTimeout = 3000; // 3 second timeout
await Promise.race([
Promise.resolve(), // toolRegistry doesn't need explicit initialization
new Promise((_, reject) => {
setTimeout(() => reject(new Error("MCP initialization timeout")), initTimeout);
}),
]);
// Register all providers with lazy loading support
await ProviderRegistry.registerAllProviders();
// Register the direct tools server to make websearch and other tools available
try {
// Use the server ID string for registration instead of the server object
await toolRegistry.registerServer("neurolink-direct", directToolsServer);
mcpLogger.debug("[NeuroLink] Direct tools server registered successfully", {
serverId: "neurolink-direct",
});
}
catch (error) {
mcpLogger.warn("[NeuroLink] Failed to register direct tools server", {
error: error instanceof Error ? error.message : String(error),
});
}
// Load MCP configuration from .mcp-config.json using ExternalServerManager
try {
const configResult = await this.externalServerManager.loadMCPConfiguration();
mcpLogger.debug("[NeuroLink] MCP configuration loaded successfully", {
serversLoaded: configResult.serversLoaded,
errors: configResult.errors.length,
});
if (configResult.errors.length > 0) {
mcpLogger.warn("[NeuroLink] Some MCP servers failed to load", {
errors: configResult.errors,
});
}
}
catch (configError) {
mcpLogger.warn("[NeuroLink] MCP configuration loading failed", {
error: configError instanceof Error
? configError.message
: String(configError),
});
}
this.mcpInitialized = true;
// Monitor memory usage and provide cleanup suggestions
const endMemory = MemoryManager.getMemoryUsageMB();
const memoryDelta = endMemory.heapUsed - startMemory.heapUsed;
const initTime = Date.now() - initStartTime;
mcpLogger.debug("[NeuroLink] MCP initialization completed successfully", {
initTime: `${initTime}ms`,
memoryUsed: `${memoryDelta}MB`,
});
// Suggest cleanup if initialization used significant memory
if (memoryDelta > 30) {
mcpLogger.debug("💡 Memory cleanup suggestion: MCP initialization used significant memory. Consider calling MemoryManager.forceGC() after heavy operations.");
}
}
catch (error) {
mcpLogger.warn("[NeuroLink] MCP initialization failed", {
error: error instanceof Error ? error.message : String(error),
});
// Continue without MCP - graceful degradation
}
}
/**
* MAIN ENTRY POINT: Enhanced generate method with new function signature
* Replaces both generateText and legacy methods
*/
/**
* Extracts the original prompt text from the provided input.
* If a string is provided, it returns the string directly.
* If a GenerateOptions object is provided, it returns the input text from the object.
* @param optionsOrPrompt The prompt input, either as a string or a GenerateOptions object.
* @returns The original prompt text as a string.
*/
_extractOriginalPrompt(optionsOrPrompt) {
return typeof optionsOrPrompt === "string"
? optionsOrPrompt
: optionsOrPrompt.input.text;
}
/**
* Enables automatic context summarization for the NeuroLink instance.
* Once enabled, the instance will maintain conversation history and
* automatically summarize it when it exceeds token limits.
* @param config Optional configuration to override default summarization settings.
*/
enableContextSummarization(config) {
const contextConfig = {
...defaultContextConfig,
...config,
};
// Pass the internal generator function directly, bound to the correct `this` context.
this.contextManager = new ContextManager(this.generateTextInternal.bind(this), contextConfig);
logger.info("[NeuroLink] Automatic context summarization enabled.");
}
async generate(optionsOrPrompt) {
const originalPrompt = this._extractOriginalPrompt(optionsOrPrompt);
// Convert string prompt to full options
const options = typeof optionsOrPrompt === "string"
? { input: { text: optionsOrPrompt } }
: optionsOrPrompt;
// Validate prompt
if (!options.input?.text || typeof options.input.text !== "string") {
throw new Error("Input text is required and must be a non-empty string");
}
// Handle Context Management if enabled
if (this.contextManager) {
// Get the full context for the prompt without permanently adding the user's turn yet
options.input.text = this.contextManager.getContextForPrompt("user", options.input.text);
}
const startTime = Date.now();
// Emit generation start event
this.emitter.emit("generation:start", {
provider: options.provider || "auto",
timestamp: startTime,
});
// Process factory configuration
const factoryResult = processFactoryOptions(options);
// Validate factory configuration if present
if (factoryResult.hasFactoryConfig && options.factoryConfig) {
const validation = validateFactoryConfig(options.factoryConfig);
if (!validation.isValid) {
logger.warn("Invalid factory configuration detected", {
errors: validation.errors,
});
// Continue with warning rather than throwing - graceful degradation
}
}
// Convert to TextGenerationOptions using factory utilities
const baseOptions = {
prompt: options.input.text,
provider: options.provider,
model: options.model,
temperature: options.temperature,
maxTokens: options.maxTokens,
systemPrompt: options.systemPrompt,
disableTools: options.disableTools,
enableAnalytics: options.enableAnalytics,
enableEvaluation: options.enableEvaluation,
context: options.context,
evaluationDomain: options.evaluationDomain,
toolUsageContext: options.toolUsageContext,
};
// Apply factory enhancement using centralized utilities
const textOptions = enhanceTextGenerationOptions(baseOptions, factoryResult);
// Detect and execute domain-specific tools
const { toolResults, enhancedPrompt } = await this.detectAndExecuteTools(textOptions.prompt || options.input.text, factoryResult.domainType);
// Update prompt with tool results if available
if (enhancedPrompt !== textOptions.prompt) {
textOptions.prompt = enhancedPrompt;
logger.debug("Enhanced prompt with tool results", {
originalLength: options.input.text.length,
enhancedLength: enhancedPrompt.length,
toolResults: toolResults.length,
});
}
// Use redesigned generation logic
const textResult = await this.generateTextInternal(textOptions);
// Emit generation completion event
this.emitter.emit("generation:end", {
provider: textResult.provider,
responseTime: Date.now() - startTime,
toolsUsed: textResult.toolsUsed,
timestamp: Date.now(),
});
// Convert back to GenerateResult
const generateResult = {
content: textResult.content,
provider: textResult.provider,
model: textResult.model,
usage: textResult.usage
? {
inputTokens: textResult.usage.promptTokens || 0,
outputTokens: textResult.usage.completionTokens || 0,
totalTokens: textResult.usage.totalTokens || 0,
}
: undefined,
responseTime: textResult.responseTime,
toolsUsed: textResult.toolsUsed,
toolExecutions: transformToolExecutions(textResult.toolExecutions),
enhancedWithTools: textResult.enhancedWithTools,
availableTools: transformAvailableTools(textResult.availableTools),
analytics: textResult.analytics,
evaluation: textResult.evaluation
? {
...textResult.evaluation,
isOffTopic: textResult.evaluation
.isOffTopic ?? false,
alertSeverity: textResult.evaluation.alertSeverity ?? "none",
reasoning: textResult.evaluation.reasoning ??
"No evaluation provided",
evaluationModel: textResult.evaluation
.evaluationModel ?? "unknown",
evaluationTime: textResult.evaluation
.evaluationTime ?? Date.now(),
// Include evaluationDomain from original options
evaluationDomain: textResult.evaluation
.evaluationDomain ??
textOptions.evaluationDomain ??
factoryResult.domainType,
}
: undefined,
};
// Add both the user's turn and the AI's response to the permanent history
if (this.contextManager) {
await this.contextManager.addTurn("user", originalPrompt);
await this.contextManager.addTurn("assistant", generateResult.content);
}
return generateResult;
}
/**
* BACKWARD COMPATIBILITY: Legacy generateText method
* Internally calls generate() and converts result format
*/
async generateText(options) {
// Validate required parameters for backward compatibility
if (!options.prompt ||
typeof options.prompt !== "string" ||
options.prompt.trim() === "") {
throw new Error("GenerateText options must include prompt as a non-empty string");
}
// Use internal generation method directly
return await this.generateTextInternal(options);
}
/**
* REDESIGNED INTERNAL GENERATION - NO CIRCULAR DEPENDENCIES
*
* This method implements a clean fallback chain:
* 1. Initialize conversation memory if enabled
* 2. Inject conversation history into prompt
* 3. Try MCP-enhanced generation if available
* 4. Fall back to direct provider generation
* 5. Store conversation turn for future context
*/
async generateTextInternal(options) {
const startTime = Date.now();
const functionTag = "NeuroLink.generateTextInternal";
logger.debug(`[${functionTag}] Starting generation`, {
provider: options.provider || "auto",
promptLength: options.prompt?.length || 0,
hasConversationMemory: !!this.conversationMemory,
});
try {
// Initialize conversation memory if enabled
if (this.conversationMemory) {
await this.conversationMemory.initialize();
}
// Try MCP-enhanced generation first (if not explicitly disabled)
if (!options.disableTools) {
let mcpAttempts = 0;
const maxMcpRetries = 2; // Allow retries for tool-related failures
while (mcpAttempts <= maxMcpRetries) {
try {
logger.debug(`[${functionTag}] Attempting MCP generation (attempt ${mcpAttempts + 1}/${maxMcpRetries + 1})...`);
const mcpResult = await this.tryMCPGeneration(options);
if (mcpResult && mcpResult.content) {
logger.debug(`[${functionTag}] MCP generation successful on attempt ${mcpAttempts + 1}`, {
contentLength: mcpResult.content.length,
toolsUsed: mcpResult.toolsUsed?.length || 0,
toolExecutions: mcpResult.toolExecutions?.length || 0,
});
// Store conversation turn
await storeConversationTurn(this.conversationMemory, options, mcpResult);
return mcpResult;
}
else {
logger.debug(`[${functionTag}] MCP generation returned empty result on attempt ${mcpAttempts + 1}:`, {
hasResult: !!mcpResult,
hasContent: !!(mcpResult && mcpResult.content),
contentLength: mcpResult?.content?.length || 0,
toolExecutions: mcpResult?.toolExecutions?.length || 0,
});
// If we got a result but no content, and we have tool executions, this might be a tool success case
if (mcpResult &&
mcpResult.toolExecutions &&
mcpResult.toolExecutions.length > 0) {
logger.debug(`[${functionTag}] Found tool executions but no content, continuing with result`);
// Store conversation turn even with empty content if tools executed
await storeConversationTurn(this.conversationMemory, options, mcpResult);
return mcpResult;
}
}
}
catch (error) {
mcpAttempts++;
logger.debug(`[${functionTag}] MCP generation failed on attempt ${mcpAttempts}/${maxMcpRetries + 1}`, {
error: error instanceof Error ? error.message : String(error),
willRetry: mcpAttempts <= maxMcpRetries,
});
// If this was the last attempt, break and fall back
if (mcpAttempts > maxMcpRetries) {
logger.debug(`[${functionTag}] All MCP attempts exhausted, falling back to direct generation`);
break;
}
// Small delay before retry to allow transient issues to resolve
await new Promise((resolve) => setTimeout(resolve, 500));
}
mcpAttempts++;
}
}
// Fall back to direct provider generation
const directResult = await this.directProviderGeneration(options);
logger.debug(`[${functionTag}] Direct generation successful`);
// Store conversation turn
await storeConversationTurn(this.conversationMemory, options, directResult);
return directResult;
}
catch (error) {
logger.error(`[${functionTag}] All generation methods failed`, {
error: error instanceof Error ? error.message : String(error),
});
throw error;
}
}
/**
* Try MCP-enhanced generation (no fallback recursion)
*/
async tryMCPGeneration(options) {
const functionTag = "NeuroLink.tryMCPGeneration";
const startTime = Date.now();
try {
// Initialize MCP if needed
await this.initializeMCP();
if (!this.mcpInitialized) {
return null; // Skip MCP if not available
}
// Context creation removed - was never used
// Determine provider
const providerName = options.provider === "auto" || !options.provider
? await getBestProvider()
: options.provider;
// Get available tools
const availableTools = await this.getAllAvailableTools();
// Create tool-aware system prompt
const enhancedSystemPrompt = this.createToolAwareSystemPrompt(options.systemPrompt, availableTools);
// Get conversation messages for context
const conversationMessages = await getConversationMessages(this.conversationMemory, options);
// Create provider and generate
const provider = await AIProviderFactory.createProvider(providerName, options.model, !options.disableTools, // Pass disableTools as inverse of enableMCP
this);
// Enable tool execution for the provider using BaseProvider method
provider.setupToolExecutor({
customTools: this.getCustomTools(),
executeTool: this.executeTool.bind(this),
}, functionTag);
const result = await provider.generate({
...options,
systemPrompt: enhancedSystemPrompt,
conversationMessages, // Inject conversation history
});
const responseTime = Date.now() - startTime;
// Enhanced result validation - consider tool executions as valid results
const hasContent = result && result.content && result.content.trim().length > 0;
const hasToolExecutions = result && result.toolExecutions && result.toolExecutions.length > 0;
// Log detailed result analysis for debugging
mcpLogger.debug(`[${functionTag}] Result validation:`, {
hasResult: !!result,
hasContent,
hasToolExecutions,
contentLength: result?.content?.length || 0,
toolExecutionsCount: result?.toolExecutions?.length || 0,
toolsUsedCount: result?.toolsUsed?.length || 0,
});
// Accept result if it has content OR successful tool executions
if (!hasContent && !hasToolExecutions) {
mcpLogger.debug(`[${functionTag}] Result rejected: no content and no tool executions`);
return null; // Let caller fall back to direct generation
}
// Transform tool executions with enhanced preservation
const transformedToolExecutions = transformToolExecutionsForMCP(result.toolExecutions);
// Log transformation results
mcpLogger.debug(`[${functionTag}] Tool execution transformation:`, {
originalCount: result?.toolExecutions?.length || 0,
transformedCount: transformedToolExecutions.length,
transformedTools: transformedToolExecutions.map((te) => te.toolName),
});
// Return enhanced result with preserved tool information
return {
content: result.content || "", // Ensure content is never undefined
provider: providerName,
usage: result.usage,
responseTime,
toolsUsed: result.toolsUsed || [],
toolExecutions: transformedToolExecutions,
enhancedWithTools: Boolean(hasToolExecutions), // Mark as enhanced if tools were actually used
availableTools: transformToolsForMCP(availableTools),
// Include analytics and evaluation from BaseProvider
analytics: result.analytics,
evaluation: result.evaluation,
};
}
catch (error) {
mcpLogger.warn(`[${functionTag}] MCP generation failed`, {
error: error instanceof Error ? error.message : String(error),
});
return null; // Let caller fall back
}
}
/**
* Direct provider generation (no MCP, no recursion)
*/
async directProviderGeneration(options) {
const startTime = Date.now();
const functionTag = "NeuroLink.directProviderGeneration";
// Define provider priority for fallback
const providerPriority = [
"openai",
"vertex",
"bedrock",
"anthropic",
"azure",
"google-ai",
"huggingface",
"ollama",
];
const requestedProvider = options.provider === "auto" ? undefined : options.provider;
// If specific provider requested, only use that provider (no fallback)
const tryProviders = requestedProvider
? [requestedProvider]
: providerPriority;
logger.debug(`[${functionTag}] Starting direct generation`, {
requestedProvider: requestedProvider || "auto",
tryProviders,
allowFallback: !requestedProvider,
});
let lastError = null;
// Try each provider in order
for (const providerName of tryProviders) {
try {
logger.debug(`[${functionTag}] Attempting provider: ${providerName}`);
// Get conversation messages for context
const conversationMessages = await getConversationMessages(this.conversationMemory, options);
const provider = await AIProviderFactory.createProvider(providerName, options.model, !options.disableTools, // Pass disableTools as inverse of enableMCP
this);
// Enable tool execution for direct provider generation using BaseProvider method
provider.setupToolExecutor({
customTools: this.getCustomTools(),
executeTool: this.executeTool.bind(this),
}, functionTag);
const result = await provider.generate({
...options,
conversationMessages, // Inject conversation history
});
const responseTime = Date.now() - startTime;
if (!result) {
throw new Error(`Provider ${providerName} returned null result`);
}
logger.debug(`[${functionTag}] Provider ${providerName} succeeded`, {
responseTime,
contentLength: result.content?.length || 0,
});
return {
content: result.content || "",
provider: providerName,
model: result.model,
usage: result.usage,
responseTime,
toolsUsed: result.toolsUsed || [],
enhancedWithTools: false,
analytics: result.analytics,
evaluation: result.evaluation,
};
}
catch (error) {
lastError = error instanceof Error ? error : new Error(String(error));
logger.warn(`[${functionTag}] Provider ${providerName} failed`, {
error: lastError.message,
});
// Continue to next provider
}
}
// All providers failed
const responseTime = Date.now() - startTime;
logger.error(`[${functionTag}] All providers failed`, {
triedProviders: tryProviders,
lastError: lastError?.message,
responseTime,
});
throw new Error(`Failed to generate text with all providers. Last error: ${lastError?.message || "Unknown error"}`);
}
/**
* Create tool-aware system prompt that informs AI about available tools
*/
createToolAwareSystemPrompt(originalSystemPrompt, availableTools) {
if (availableTools.length === 0) {
return originalSystemPrompt || "";
}
const toolDescriptions = transformToolsToDescriptions(availableTools);
const toolPrompt = `\n\nYou have access to these additional tools if needed:\n${toolDescriptions}\n\nIMPORTANT: You are a general-purpose AI assistant. Answer all requests directly and creatively. These tools are optional helpers - use them only when they would genuinely improve your response. For creative tasks like storytelling, writing, or general conversation, respond naturally without requiring tools.`;
return (originalSystemPrompt || "") + toolPrompt;
}
/**
* Execute tools if available through centralized registry
* Simplified approach without domain detection - relies on tool registry
*/
async detectAndExecuteTools(prompt, domainType) {
const functionTag = "NeuroLink.detectAndExecuteTools";
try {
// Simplified: Just return original prompt without complex detection
// Tools will be available through normal MCP flow when AI decides to use them
logger.debug(`[${functionTag}] Skipping automatic tool execution - relying on centralized registry`);
return { toolResults: [], enhancedPrompt: prompt };
}
catch (error) {
logger.error(`[${functionTag}] Tool detection/execution failed`, {
error: error instanceof Error ? error.message : String(error),
});
return { toolResults: [], enhancedPrompt: prompt };
}
}
/**
* Enhance prompt with tool results (domain-agnostic)
*/
enhancePromptWithToolResults(prompt, toolResults) {
if (toolResults.length === 0) {
return prompt;
}
let enhancedPrompt = prompt;
for (const result of toolResults) {
if (result && typeof result === "object") {
enhancedPrompt += `\n\nTool Results:\n`;
// Handle structured result generically
try {
const resultStr = typeof result === "string"
? result
: JSON.stringify(result, null, 2);
enhancedPrompt += resultStr + "\n";
}
catch {
enhancedPrompt += "Tool execution completed\n";
}
}
}
return enhancedPrompt;
}
/**
* BACKWARD COMPATIBILITY: Legacy streamText method
* Internally calls stream() and converts result format
*/
async streamText(prompt, options) {
// Convert legacy format to new StreamOptions
const streamOptions = {
input: { text: prompt },
...options,
};
// Call the new stream method
const result = await this.stream(streamOptions);
// Convert StreamResult to simple string async iterable
async function* stringStream() {
for await (const chunk of result.stream) {
yield chunk.content;
}
}
return stringStream();
}
/**
* PRIMARY METHOD: Stream content using AI (recommended for new code)
* Future-ready for multi-modal capabilities with current text focus
*/
async stream(options) {
const startTime = Date.now();
const functionTag = "NeuroLink.stream";
// Validate input
if (!options?.input?.text ||
typeof options.input.text !== "string" ||
options.input.text.trim() === "") {
throw new Error("Stream options must include input.text as a non-empty string");
}
// Emit stream start event
this.emitter.emit("stream:start", {
provider: options.provider || "auto",
timestamp: startTime,
});
// Process factory configuration for streaming
const factoryResult = processFactoryOptions(options);
const streamingResult = processStreamingFactoryOptions(options);
// Validate factory configuration if present
if (factoryResult.hasFactoryConfig && options.factoryConfig) {
const validation = validateFactoryConfig(options.factoryConfig);
if (!validation.isValid) {
mcpLogger.warn("Invalid factory configuration detected in stream", {
errors: validation.errors,
});
// Continue with warning rather than throwing - graceful degradation
}
}
// Log factory processing results
if (factoryResult.hasFactoryConfig) {
mcpLogger.debug(`[${functionTag}] Factory configuration detected`, {
domainType: factoryResult.domainType,
enhancementType: factoryResult.enhancementType,
hasStreamingConfig: streamingResult.hasStreamingConfig,
});
}
// Initialize MCP if needed
await this.initializeMCP();
// Context creation removed - was never used
// Determine provider to use
const providerName = options.provider === "auto" || !options.provider
? await getBestProvider()
: options.provider;
// Prepare enhanced options for both success and fallback paths
let enhancedOptions = options;
if (factoryResult.hasFactoryConfig) {
enhancedOptions = {
...options,
// Merge contexts instead of overriding to preserve provider-required context
context: {
...(options.context || {}),
...(factoryResult.processedContext || {}),
},
// Ensure evaluation is enabled when using factory patterns
enableEvaluation: options.enableEvaluation ?? true,
// Use domain type for evaluation if available
evaluationDomain: factoryResult.domainType || options.evaluationDomain,
};
mcpLogger.debug(`[${functionTag}] Enhanced stream options with factory config`, {
domainType: factoryResult.domainType,
enhancementType: factoryResult.enhancementType,
hasProcessedContext: !!factoryResult.processedContext,
});
}
try {
mcpLogger.debug(`[${functionTag}] Starting MCP-enabled streaming`, {
provider: providerName,
prompt: (options.input.text?.substring(0, 100) || "No text") + "...",
});
// Initialize conversation memory if enabled (same as generate function)
if (this.conversationMemory) {
await this.conversationMemory.initialize();
}
// Get conversation messages for context injection (same as generate function)
const conversationMessages = await getConversationMessages(this.conversationMemory, {
prompt: options.input.text,
context: enhancedOptions.context,
});
// Create provider using the same factory pattern as generate
const provider = await AIProviderFactory.createBestProvider(providerName, options.model, true, this);
// Enable tool execution for streaming using BaseProvider method
provider.setupToolExecutor({
customTools: this.getCustomTools(),
executeTool: this.executeTool.bind(this),
}, functionTag);
// Create clean options for provider (remove factoryConfig) and inject conversation history
const cleanOptions = createCleanStreamOptions(enhancedOptions);
const optionsWithHistory = {
...cleanOptions,
conversationMessages, // Inject conversation history like in generate function
};
// Call the provider's stream method with conversation history
const streamResult = await provider.stream(optionsWithHistory);
// Extract the stream from the result
const originalStream = streamResult.stream;
// Create a proper tee pattern that accumulates content and stores memory after consumption
let accumulatedContent = "";
const processedStream = (async function* (self) {
try {
for await (const chunk of originalStream) {
// Enhanced chunk validation and content handling
let processedChunk = chunk;
if (chunk && typeof chunk === "object") {
// Ensure chunk has content property and it's a string
if (typeof chunk.content === "string") {
accumulatedContent += chunk.content;
}
else if (chunk.content === undefined ||
chunk.content === null) {
// Handle undefined/null content gracefully - create a new chunk object
processedChunk = { ...chunk, content: "" };
}
else if (typeof chunk.content !== "string") {
// Convert non-string content to string - create a new chunk object
const stringContent = String(chunk.content || "");
processedChunk = { ...chunk, content: stringContent };
accumulatedContent += stringContent;
}
}
else if (chunk === null || chunk === undefined) {
// Create a safe empty chunk if chunk is null/undefined
processedChunk = { content: "" };
}
yield processedChunk; // Preserve original streaming behavior with safe content
}
}
finally {
// Store memory after stream consumption
if (self.conversationMemory) {
try {
await self.conversationMemory.storeConversationTurn(enhancedOptions.context
?.sessionId, enhancedOptions.context
?.userId, options.input.text, accumulatedContent);
logger.debug("Stream conversation turn stored", {
sessionId: enhancedOptions.context
?.sessionId,
userInputLength: options.input.text.length,
responseLength: accumulatedContent.length,
});
}
catch (error) {
logger.warn("Failed to store stream conversation turn", {
error: error instanceof Error ? error.message : String(error),
});
}
}
}
})(this);
const responseTime = Date.now() - startTime;
mcpLogger.debug(`[${functionTag}] MCP-enabled streaming completed`, {
responseTime,
provider: providerName,
});
// Emit stream completion event
this.emitter.emit("stream:end", {
provider: providerName,
responseTime,
});
// Convert to StreamResult format - Include analytics and evaluation from provider
return {
stream: processedStream,
provider: providerName,
model: options.model,
usage: streamResult.usage,
finishReason: streamResult.finishReason,
toolCalls: streamResult.toolCalls,
toolResults: streamResult.toolResults,
analytics: streamResult.analytics,
evaluation: streamResult.evaluation
? {
...streamResult.evaluation,
// Include evaluationDomain from factory configuration
evaluationDomain: streamResult.evaluation
?.evaluationDomain ??
enhancedOptions.evaluationDomain ??
factoryResult.domainType,
}
: undefined,
metadata: {
streamId: `neurolink-${Date.now()}`,
startTime,
responseTime,
},
};
}
catch (error) {
// Fall back to regular streaming if MCP fails
mcpLogger.warn(`[${functionTag}] MCP streaming failed, falling back to regular`, {
error: error instanceof Error ? error.message : String(error),
});
// Initialize conversation memory for fallback path (same as success path)
if (this.conversationMemory) {
await this.conversationMemory.initialize();
}
// Get conversation messages for fallback context injection
const fallbackConversationMessages = await getConversationMessages(this.conversationMemory, {
prompt: options.input.text,
context: enhancedOptions.context,
});
// Use factory to create provider without MCP
const provider = await AIProviderFactory.createBestProvider(providerName, options.model, false, // Disable MCP for fallback
this);
// Enable tool execution for fallback streaming using BaseProvider method
provider.setupToolExecutor({
customTools: this.getCustomTools(),
executeTool: this.executeTool.bind(this),
}, functionTag);
// Create clean options for fallback provider and inject conversation history
const cleanOptions = createCleanStreamOptions(enhancedOptions);
const fallbackOptionsWithHistory = {
...cleanOptions,
conversationMessages: fallbackConversationMessages, // Inject conversation history in fallback
};
const streamResult = await provider.stream(fallbackOptionsWithHistory);
// Create a proper tee pattern for fallback that accumulates content and stores memory after consumption
let fallbackAccumulatedContent = "";
const fallbackProcessedStream = (async function* (self) {
try {
for await (const chunk of streamResult.stream) {
if (chunk && typeof chunk.content === "string") {
fallbackAccumulatedContent += chunk.content;
}
yield chunk; // Preserve original streaming behavior
}
}
finally {
// Store memory after fallback stream consumption
if (self.conversationMemory) {
try {
await self.conversationMemory.storeConversationTurn(enhancedOptions.context
?.sessionId, enhancedOptions.context
?.userId, options.input.text, fallbackAccumulatedContent);
logger.debug("Fallback stream conversation turn stored", {
sessionId: enhancedOptions.context
?.sessionId,
userInputLength: options.input.text.length,
responseLength: fallbackAccumulatedContent.length,
});
}
catch (error) {
logger.warn("Failed to store fallback stream conversation turn", {
error: error instanceof Error ? error.message : String(error),
});
}
}
}
})(this);
const responseTime = Date.now() - startTime;
// Emit stream completion event for fallback
this.emitter.emit("stream:end", {
provider: providerName,
responseTime,
fallback: true,
});
return {
stream: fallbackProcessedStream,
provider: providerName,
model: options.model,
usage: streamResult.usage,
finishReason: streamResult.finishReason,
toolCalls: streamResult.toolCalls,
toolResults: streamResult.toolResults,
analytics: streamResult.analytics,
evaluation: streamResult.evaluation
? {
...streamResult.evaluation,
// Include evaluationDomain in fallback stream
evaluationDomain: streamResult.evaluation
?.evaluationDomain ??
enhancedOptions.evaluationDomain ??
factoryResult.domainType,
}
: undefined,
metadata: {
streamId: `neurolink-${Date.now()}`,
startTime,
responseTime,
fallback: true,
},
};
}
}
/**
* Get the EventEmitter to listen to NeuroLink events
* @returns EventEmitter instance
*/
getEventEmitter() {
return this.emitter;
}
// ========================================
// Tool Registration API
// ========================================
/**
* Register a custom tool that will be available to all AI providers
* @param name - Unique name for the tool
* @param tool - Tool in MCPExecutableTool format (unified MCP protocol type)
*/
registerTool(name, tool) {
// Emit tool registration start event
this.emitter.emit("tools-register:start", {
toolName: name,
timestamp: Date.now(),
});
try {
// --- Start: Added Validation Logic ---
if (!name || typeof name !== "string") {
throw new Error("Invalid tool name");
}
if (!tool || typeof tool !== "object") {
throw new Error(`Invalid tool object provided for tool: ${name}`);
}
if (typeof tool.execute !== "function") {
throw new Error(`Tool '${name}' must have an execute method.`);
}
// --- End: Added Validation Logic ---
// Import validation functions synchronously - they are pure functions
let validateTool;
let isToolNameAvailable;
let suggestToolNames;
try {
// Try ES module import first
const toolRegistrationModule = require("./sdk/toolRegistration.js");
({ validateTool, isToolNameAvailable, suggestToolNames } =
toolRegistrationModule);
}
catch (error) {
// Fallback: skip validation if import fails (graceful degradation)
logger.warn("Tool validation module not available, skipping advanced validation", {