@gork-labs/secondbrain-mcp
Version:
Second Brain MCP Server - Agent team orchestration with dynamic tool discovery
985 lines (984 loc) • 101 kB
JavaScript
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import { CallToolRequestSchema, ErrorCode, ListToolsRequestSchema, McpError, } from '@modelcontextprotocol/sdk/types.js';
import OpenAI from 'openai';
import { SessionManager } from './session-manager.js';
import { SubagentLoader } from '../subagents/loader.js';
import { QualityValidator, RefinementManager } from '../quality/index.js';
import { AnalyticsManager } from '../analytics/analytics-manager.js';
import { MLEngine } from '../ml/ml-engine.js';
import { SpawnAgentArgsSchema, ValidateOutputArgsSchema, SubAgentResponseSchema, SpawnAgentsParallelArgsSchema } from '../utils/types.js';
import { parseJsonForgiving, validateSubAgentStructure } from '../utils/json-parser.js';
import { ResponseParser } from '../utils/response-parser.js';
import { config, validateConfig } from '../utils/config.js';
import { logger } from '../utils/logger.js';
import { templateManager } from '../utils/template-manager.js';
import { getPackageInfo } from '../utils/version.js';
import { MCPClientManager } from '../tools/mcp-client-manager.js';
export class SecondBrainServer {
server;
sessionManager;
subagentLoader;
qualityValidator;
refinementManager;
analyticsManager;
mlEngine;
mcpClientManager;
constructor() {
const packageInfo = getPackageInfo();
this.server = new Server({
name: packageInfo.name,
version: packageInfo.version,
}, {
capabilities: {
tools: {},
},
});
this.sessionManager = new SessionManager();
this.subagentLoader = new SubagentLoader();
this.qualityValidator = new QualityValidator();
this.refinementManager = new RefinementManager(this.sessionManager, this.qualityValidator);
this.analyticsManager = new AnalyticsManager();
this.mlEngine = new MLEngine(this.analyticsManager);
this.mcpClientManager = new MCPClientManager('subagent'); // Use subagent context to get filesystem tools
this.setupHandlers();
}
setupHandlers() {
// List available tools
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
// Only return SecondBrain native tools - MCP tools are for internal sub-agent use only
const nativeTools = [
{
name: 'spawn_agent',
description: 'Spawn a specialized sub-agent with domain expertise',
inputSchema: {
type: 'object',
properties: {
subagent: {
type: 'string',
description: 'The subagent type to use for the sub-agent (e.g., "Security Engineer")',
},
task: {
type: 'string',
description: 'Detailed task description for the sub-agent',
},
context: {
type: 'string',
description: 'Relevant context and background information',
},
expected_deliverables: {
type: 'string',
description: 'What the sub-agent should produce',
},
},
required: ['subagent', 'task', 'context', 'expected_deliverables'],
},
},
{
name: 'spawn_agents_parallel',
description: 'Spawn multiple specialized sub-agents concurrently for parallel execution',
inputSchema: {
type: 'object',
properties: {
agents: {
type: 'array',
description: 'Array of agent specifications to spawn in parallel',
items: {
type: 'object',
properties: {
agent_id: {
type: 'string',
description: 'Unique identifier for this agent in the batch (for result correlation)',
},
subagent: {
type: 'string',
description: 'The subagent type to use for the sub-agent (e.g., "Security Engineer")',
},
task: {
type: 'string',
description: 'Detailed task description for the sub-agent',
},
context: {
type: 'string',
description: 'Relevant context and background information',
},
expected_deliverables: {
type: 'string',
description: 'What the sub-agent should produce',
},
},
required: ['agent_id', 'subagent', 'task', 'context', 'expected_deliverables'],
},
minItems: 1,
maxItems: 5,
},
coordination_context: {
type: 'string',
description: 'Overall project context for coordinating between agents (optional)',
},
},
required: ['agents'],
},
},
{
name: 'list_subagents',
description: 'List all available chatmodes for sub-agent spawning',
inputSchema: {
type: 'object',
properties: {},
},
},
{
name: 'validate_output',
description: 'Validate and assess quality of sub-agent response with comprehensive quality control',
inputSchema: {
type: 'object',
properties: {
sub_agent_response: {
type: 'string',
description: 'JSON response from sub-agent to validate',
},
requirements: {
type: 'string',
description: 'Original task requirements',
},
quality_criteria: {
type: 'string',
description: 'Quality criteria for validation',
},
chatmode: {
type: 'string',
description: 'Chatmode used for the sub-agent (for chatmode-specific validation)',
},
session_id: {
type: 'string',
description: 'Session ID for refinement tracking (optional)',
},
enable_refinement: {
type: 'boolean',
description: 'Whether to generate refinement recommendations (default: true)',
},
},
required: ['sub_agent_response', 'requirements', 'quality_criteria'],
},
},
{
name: 'get_session_stats',
description: 'Get statistics for current session',
inputSchema: {
type: 'object',
properties: {
session_id: {
type: 'string',
description: 'Session ID to get stats for (optional)',
},
},
},
},
{
name: 'get_quality_analytics',
description: 'Get quality trends and insights for SecondBrain MCP system',
inputSchema: {
type: 'object',
properties: {
chatmode: {
type: 'string',
description: 'Filter by specific chatmode (optional)',
},
days: {
type: 'number',
description: 'Number of days to analyze (default: 7)',
},
},
},
},
{
name: 'get_performance_analytics',
description: 'Get performance metrics and optimization insights',
inputSchema: {
type: 'object',
properties: {
operation: {
type: 'string',
description: 'Specific operation to analyze (optional)',
},
days: {
type: 'number',
description: 'Number of days to analyze (default: 7)',
},
},
},
},
{
name: 'get_system_health',
description: 'Get current system health status and usage patterns',
inputSchema: {
type: 'object',
properties: {
include_patterns: {
type: 'boolean',
description: 'Include detailed usage patterns (default: true)',
},
days: {
type: 'number',
description: 'Number of days for pattern analysis (default: 7)',
},
},
},
},
{
name: 'generate_analytics_report',
description: 'Generate comprehensive analytics report for SecondBrain system',
inputSchema: {
type: 'object',
properties: {
days: {
type: 'number',
description: 'Number of days to include in report (default: 30)',
},
format: {
type: 'string',
enum: ['summary', 'detailed', 'executive'],
description: 'Report format level (default: detailed)',
},
},
},
},
{
name: 'predict_quality_score',
description: 'Predict quality score for a given context using machine learning',
inputSchema: {
type: 'object',
properties: {
chatmode: {
type: 'string',
description: 'The chatmode for prediction',
},
requirements: {
type: 'string',
description: 'Task requirements',
},
quality_criteria: {
type: 'string',
description: 'Quality criteria',
},
},
required: ['chatmode', 'requirements', 'quality_criteria'],
},
},
{
name: 'predict_refinement_success',
description: 'Predict likelihood of successful refinement',
inputSchema: {
type: 'object',
properties: {
current_score: {
type: 'number',
description: 'Current quality score (0-1)',
},
chatmode: {
type: 'string',
description: 'The chatmode being used',
},
requirements: {
type: 'string',
description: 'Task requirements',
},
quality_criteria: {
type: 'string',
description: 'Quality criteria',
},
refinement_attempt: {
type: 'number',
description: 'Current refinement attempt number',
},
},
required: ['current_score', 'chatmode', 'requirements', 'quality_criteria', 'refinement_attempt'],
},
},
{
name: 'get_ml_insights',
description: 'Get machine learning insights about system performance and patterns',
inputSchema: {
type: 'object',
properties: {},
},
},
{
name: 'get_optimization_suggestions',
description: 'Get ML-generated optimization suggestions for system improvement',
inputSchema: {
type: 'object',
properties: {},
},
},
{
name: 'get_mcp_server_stats',
description: 'Get statistics and status information about MCP servers managed by SecondBrain',
inputSchema: {
type: 'object',
properties: {
include_tools: {
type: 'boolean',
description: 'Include detailed tool information for each server (default: true)',
},
server_filter: {
type: 'string',
description: 'Filter by specific server name (optional)',
},
},
},
},
];
// Return only native SecondBrain tools - MCP tools are internal only
return {
tools: nativeTools,
};
});
// Handle tool calls
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
try {
switch (name) {
case 'spawn_agent':
return await this.handleSpawnAgent(args);
case 'spawn_agents_parallel':
return await this.handleSpawnAgentsParallel(args);
case 'list_subagents':
return await this.handleListSubagents();
case 'validate_output':
return await this.handleValidateOutput(args);
case 'get_session_stats':
return await this.handleGetSessionStats(args);
case 'get_quality_analytics':
return await this.handleGetQualityAnalytics(args);
case 'get_performance_analytics':
return await this.handleGetPerformanceAnalytics(args);
case 'get_system_health':
return await this.handleGetSystemHealth(args);
case 'generate_analytics_report':
return await this.handleGenerateAnalyticsReport(args);
case 'predict_quality_score':
return await this.handlePredictQualityScore(args);
case 'predict_refinement_success':
return await this.handlePredictRefinementSuccess(args);
case 'get_ml_insights':
return await this.handleGetMLInsights(args);
case 'get_optimization_suggestions':
return await this.handleGetOptimizationSuggestions(args);
case 'get_mcp_server_stats':
return await this.handleGetMCPServerStats(args);
default:
throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${name}`);
}
}
catch (error) {
logger.error('Tool call failed', {
tool: name,
args,
error: error instanceof Error ? error.message : String(error)
});
if (error instanceof McpError) {
throw error;
}
throw new McpError(ErrorCode.InternalError, `Tool execution failed: ${error instanceof Error ? error.message : String(error)}`);
}
});
}
async handleSpawnAgentsParallel(args) {
const startTime = Date.now();
// Validate input arguments using Zod schema
const validatedArgs = SpawnAgentsParallelArgsSchema.parse(args);
const { agents: validatedAgents, coordination_context } = validatedArgs;
// Check parallel agent count limits
if (!this.sessionManager.canSpawnParallelAgents(validatedAgents.length)) {
throw new McpError(ErrorCode.InvalidParams, `Too many parallel agents requested (${validatedAgents.length}). Maximum allowed: ${config.maxParallelAgents}. Adjust SECONDBRAIN_MAX_PARALLEL_AGENTS environment variable to increase limit.`);
}
// Create coordinator session for tracking all parallel agents
const coordinatorSessionId = this.sessionManager.createSession(false);
const coordinatorLog = logger.withSession(coordinatorSessionId);
coordinatorLog.info('Starting parallel agent spawning', {
agentCount: validatedAgents.length,
agentIds: validatedAgents.map((a) => a.agent_id),
coordinationContext: coordination_context?.substring(0, 100) + '...' || 'none'
});
try {
// Check for duplicate agent IDs
const agentIds = validatedAgents.map((a) => a.agent_id);
const uniqueAgentIds = new Set(agentIds);
if (uniqueAgentIds.size !== agentIds.length) {
throw new McpError(ErrorCode.InvalidParams, 'Duplicate agent_id values found. Each agent must have a unique agent_id');
}
// Check coordinator session limits for total spawning capacity and depth
if (!this.sessionManager.canSpawnAgent(coordinatorSessionId)) {
const session = this.sessionManager.getSession(coordinatorSessionId);
const currentDepth = session?.currentDepth || 0;
throw new McpError(ErrorCode.InvalidRequest, `Cannot spawn parallel agents: Maximum depth exceeded (${currentDepth}/${config.maxDepth}). Adjust SECONDBRAIN_MAX_DEPTH environment variable to increase limit.`);
}
// Spawn all agents concurrently using Promise.all
coordinatorLog.info('Spawning agents in parallel', { agentCount: validatedAgents.length });
const spawnPromises = validatedAgents.map(async (agentSpec) => {
const agentStartTime = Date.now();
try {
// Validate each agent's task doesn't contain tool call mentions
const taskValidation = this.validateTaskForToolMentions(agentSpec.task, agentSpec.context || '');
if (!taskValidation.valid) {
throw new McpError(ErrorCode.InvalidRequest, `Invalid task description for agent ${agentSpec.agent_id}: ${taskValidation.error}`);
}
// Create individual session for this agent as sub-agent
const agentSessionId = this.sessionManager.createSession(true, coordinatorSessionId);
const agentLog = logger.withSession(agentSessionId);
agentLog.info('Spawning individual agent in parallel batch', {
agentId: agentSpec.agent_id,
subagent: agentSpec.subagent,
coordinatorSession: coordinatorSessionId
});
// Get subagent definition
const subagentDefinition = this.subagentLoader.getSubagent(agentSpec.subagent);
const taskHash = this.sessionManager.generateTaskHash(agentSpec.task, agentSpec.context, agentSpec.subagent);
// Track this agent call in both coordinator and individual sessions (NOT as refinements)
this.sessionManager.trackAgentCall(coordinatorSessionId, agentSpec.subagent, taskHash, false);
this.sessionManager.trackAgentCall(agentSessionId, agentSpec.subagent, taskHash, false);
// Execute real sub-agent
const response = await this.spawnRealAgent(subagentDefinition, agentSpec.task, agentSpec.context, agentSpec.expected_deliverables, agentSessionId);
const executionTime = Date.now() - agentStartTime;
agentLog.info('Parallel agent completed successfully', {
agentId: agentSpec.agent_id,
subagent: agentSpec.subagent,
executionTime,
status: response.metadata.task_completion_status
});
return {
agent_id: agentSpec.agent_id,
subagent: agentSpec.subagent,
status: 'success',
execution_time_ms: executionTime,
session_id: agentSessionId,
response: response
};
}
catch (error) {
const executionTime = Date.now() - agentStartTime;
coordinatorLog.error('Parallel agent failed', {
agentId: agentSpec.agent_id,
subagent: agentSpec.subagent,
executionTime,
error: error instanceof Error ? error.message : String(error)
});
return {
agent_id: agentSpec.agent_id,
subagent: agentSpec.subagent,
status: 'failed',
execution_time_ms: executionTime,
session_id: null,
error: error instanceof Error ? error.message : String(error)
};
}
});
// Wait for all agents to complete (or fail)
const results = await Promise.all(spawnPromises);
const totalExecutionTime = Date.now() - startTime;
// Analyze results
const successfulAgents = results.filter(r => r.status === 'success');
const failedAgents = results.filter(r => r.status === 'failed');
coordinatorLog.info('Parallel agent spawning completed', {
totalAgents: results.length,
successful: successfulAgents.length,
failed: failedAgents.length,
totalExecutionTime,
coordinatorSession: coordinatorSessionId
});
// Prepare response
const parallelResponse = {
parallel_execution: {
coordinator_session_id: coordinatorSessionId,
total_agents: results.length,
successful_agents: successfulAgents.length,
failed_agents: failedAgents.length,
total_execution_time_ms: totalExecutionTime,
coordination_context: coordination_context || null,
completed_at: new Date().toISOString()
},
agent_results: results.map(result => ({
agent_id: result.agent_id,
subagent: result.subagent,
status: result.status,
execution_time_ms: result.execution_time_ms,
session_id: result.session_id,
...(result.status === 'success' ? { response: result.response } : {}),
...(result.status === 'failed' ? { error: result.error } : {})
})),
summary: {
all_successful: failedAgents.length === 0,
partial_success: successfulAgents.length > 0 && failedAgents.length > 0,
total_failure: successfulAgents.length === 0,
fastest_agent: results.reduce((min, r) => r.execution_time_ms < min.execution_time_ms ? r : min, results[0]),
slowest_agent: results.reduce((max, r) => r.execution_time_ms > max.execution_time_ms ? r : max, results[0]),
average_execution_time_ms: Math.round(results.reduce((sum, r) => sum + r.execution_time_ms, 0) / results.length)
}
};
return {
content: [
{
type: 'text',
text: JSON.stringify(parallelResponse, null, 2),
},
],
};
}
catch (error) {
const totalExecutionTime = Date.now() - startTime;
coordinatorLog.error('Parallel agent spawning failed completely', {
error: error instanceof Error ? error.message : String(error),
totalExecutionTime,
agentCount: validatedAgents.length
});
throw error;
}
}
/**
* Validate task description to ensure it doesn't contain tool call mentions
* Sub-agents have different tool capabilities, so tool names shouldn't be specified in tasks
*/
validateTaskForToolMentions(task, context) {
const toolCallPatterns = [
// Direct tool mentions
/use\s+the\s+([a-zA-Z_][a-zA-Z0-9_]*)\s+tool/gi,
/call\s+the\s+([a-zA-Z_][a-zA-Z0-9_]*)\s+(tool|function)/gi,
/execute\s+the\s+([a-zA-Z_][a-zA-Z0-9_]*)\s+(tool|function)/gi,
/run\s+the\s+([a-zA-Z_][a-zA-Z0-9_]*)\s+tool/gi,
// Function-style mentions
/call\s+([a-zA-Z_][a-zA-Z0-9_]*)\(/gi,
/execute\s+([a-zA-Z_][a-zA-Z0-9_]*)\(/gi,
/use\s+([a-zA-Z_][a-zA-Z0-9_]*)\(/gi,
// Tool-specific language patterns
/use\s+([a-zA-Z_][a-zA-Z0-9_]*)\s+to\s+(read|write|search|find|analyze)/gi,
/with\s+the\s+([a-zA-Z_][a-zA-Z0-9_]*)\s+tool/gi,
/via\s+the\s+([a-zA-Z_][a-zA-Z0-9_]*)\s+tool/gi,
// Common tool names that should never appear in tasks
/\b(read_file|write_file|list_dir|file_search|grep_search|git_diff|git_status|memory_search|spawn_agent|validate_output)\b/gi
];
const detectedPatterns = [];
const fullText = `${task} ${context}`;
for (const pattern of toolCallPatterns) {
const matches = fullText.match(pattern);
if (matches) {
detectedPatterns.push(...matches);
}
}
if (detectedPatterns.length > 0) {
return {
valid: false,
error: `Task description contains tool call mentions: ${detectedPatterns.join(', ')}. ` +
`Sub-agents have different tool capabilities. Instead of specifying tools, describe WHAT needs to be done, not HOW. ` +
`Example: Instead of "Use the read_file tool to analyze code", say "Analyze the code structure and identify issues".`,
detectedPatterns
};
}
return { valid: true };
}
async handleSpawnAgent(args) {
const validatedArgs = SpawnAgentArgsSchema.parse(args);
const { subagent, task, context, expected_deliverables } = validatedArgs;
// Validate task doesn't contain tool call mentions
const taskValidation = this.validateTaskForToolMentions(task, context || '');
if (!taskValidation.valid) {
throw new McpError(ErrorCode.InvalidRequest, `Invalid task description: ${taskValidation.error}`);
}
// Create session for this operation (assuming primary agent context)
const sessionId = this.sessionManager.createSession(false);
const sessionLog = logger.withSession(sessionId);
try {
sessionLog.info('Spawning sub-agent', { subagent, task: task.substring(0, 100) + '...' });
// Check if we can spawn (loop protection and depth limits)
if (!this.sessionManager.canSpawnAgent(sessionId)) {
const session = this.sessionManager.getSession(sessionId);
const currentDepth = session?.currentDepth || 0;
throw new McpError(ErrorCode.InvalidRequest, `Cannot spawn agent: Maximum depth exceeded (${currentDepth}/${config.maxDepth}). Adjust SECONDBRAIN_MAX_DEPTH environment variable to increase limit.`);
}
// Get subagent definition
const subagentDefinition = this.subagentLoader.getSubagent(subagent);
const taskHash = this.sessionManager.generateTaskHash(task, context, subagent);
// Track this agent call (NOT as refinement)
this.sessionManager.trackAgentCall(sessionId, subagent, taskHash, false);
// **REAL SUB-AGENT SPAWNING** - Implementing approved architecture v3.0.0
// Replace simulation with actual specialist agent delegation
const realResponse = await this.spawnRealAgent(subagentDefinition, task, context, expected_deliverables, sessionId);
sessionLog.info('Sub-agent completed task', {
subagent,
status: realResponse.metadata.task_completion_status,
confidence: realResponse.metadata.confidence_level
});
// Execute any tools the agent requested
let finalResult = realResponse;
if (realResponse.tool && realResponse.arguments) {
try {
sessionLog.info('Executing tool requested by agent', {
tool: realResponse.tool,
arguments: realResponse.arguments
});
const toolResult = await this.mcpClientManager.callTool(realResponse.tool, realResponse.arguments);
if (toolResult.success) {
// Return the actual tool execution results
const content = Array.isArray(toolResult.content)
? toolResult.content.map(item => item.text || item.type === 'text' ? item.text : JSON.stringify(item)).join('\n')
: String(toolResult.content);
return {
content: [
{
type: 'text',
text: content,
},
],
};
}
else {
sessionLog.error('Tool execution failed', {
tool: realResponse.tool,
error: toolResult.error
});
// FIXED: Return proper error instead of silencing it
const errorResponse = {
error: {
type: 'tool_execution_failed',
tool_name: realResponse.tool,
message: toolResult.error || 'Tool execution failed',
details: `The tool '${realResponse.tool}' failed to execute. Error: ${toolResult.error || 'Unknown error'}`
},
metadata: {
session_id: sessionId,
subagent: subagent,
timestamp: new Date().toISOString()
}
};
return {
content: [
{
type: 'text',
text: JSON.stringify(errorResponse, null, 2),
},
],
};
}
}
catch (error) {
sessionLog.error('Failed to execute tool', {
tool: realResponse.tool,
error: error instanceof Error ? error.message : String(error)
});
// FIXED: Return proper error instead of silencing it
const errorResponse = {
error: {
type: 'tool_execution_exception',
tool_name: realResponse.tool,
message: error instanceof Error ? error.message : String(error),
details: `An exception occurred while executing tool '${realResponse.tool}': ${error instanceof Error ? error.message : String(error)}`
},
metadata: {
session_id: sessionId,
subagent: subagent,
timestamp: new Date().toISOString()
}
};
return {
content: [
{
type: 'text',
text: JSON.stringify(errorResponse, null, 2),
},
],
};
}
}
// Default: return the agent's response (for non-tool responses or when tool execution fails)
return {
content: [
{
type: 'text',
text: JSON.stringify(finalResult, null, 2),
},
],
};
}
catch (error) {
sessionLog.error('Failed to spawn sub-agent', {
subagent,
error: error instanceof Error ? error.message : String(error)
});
throw error;
}
}
async handleListSubagents() {
const subagents = this.subagentLoader.getAllSubagentsInfo();
return {
content: [
{
type: 'text',
text: JSON.stringify({
available_subagents: subagents,
total_count: subagents.length
}, null, 2),
},
],
};
}
async handleValidateOutput(args) {
const startTime = Date.now();
const operationId = `validation_${startTime}`;
// Start analytics operation tracking
this.analyticsManager.startTiming(operationId, 'quality_validation');
try {
// Parse and validate arguments
const validatedArgs = ValidateOutputArgsSchema.parse(args);
const { sub_agent_response, requirements, quality_criteria, subagent = 'default', session_id, enable_refinement = true } = validatedArgs;
// Use forgiving JSON parser for sub-agent response
const parseResult = parseJsonForgiving(sub_agent_response);
if (!parseResult.success) {
logger.error('Failed to parse sub-agent response with forgiving parser', {
error: parseResult.error,
fixesAttempted: parseResult.fixesApplied,
subagent,
session_id,
operationId
});
// End analytics operation as failed
this.analyticsManager.endTiming(operationId, 'quality_validation', false, 'json_parse_failure');
return this.createValidationErrorResponse(parseResult.error || 'JSON parsing failed', Date.now() - startTime, parseResult.fixesApplied || []);
}
logger.info('Successfully parsed sub-agent response', {
fixesApplied: parseResult.fixesApplied || [],
subagent,
session_id,
operationId
});
// Validate basic structure
const structureValidation = validateSubAgentStructure(parseResult.data);
if (!structureValidation.valid) {
logger.warn('Sub-agent response has structural issues', {
issues: structureValidation.issues,
subagent,
session_id,
operationId
});
}
// Try to validate with Zod schema, but be forgiving if it fails
let validatedResponse;
try {
validatedResponse = SubAgentResponseSchema.parse(parseResult.data);
}
catch (zodError) {
logger.warn('Zod validation failed, using forgiving validation', {
zodError: zodError instanceof Error ? zodError.message : String(zodError),
subagent,
session_id,
operationId
});
// Create a valid response structure from the parsed data
validatedResponse = this.createValidSubAgentResponse(parseResult.data, subagent);
}
// Create validation context
const context = {
subagent,
requirements,
qualityCriteria: quality_criteria,
sessionHistory: session_id ? this.sessionManager.getSession(session_id) || undefined : undefined
};
// Perform enhanced quality validation
const enhancedAssessment = await this.qualityValidator.validateResponse(validatedResponse, context);
// Record analytics event for this validation
this.analyticsManager.recordValidationEvent(enhancedAssessment, context, session_id, operationId);
// ML Learning: Let ML engine learn from this validation result
try {
await this.mlEngine.learnFromValidation(context, enhancedAssessment);
}
catch (mlError) {
// Log but don't fail validation if ML learning fails
logger.warn('ML learning failed for validation', {
error: mlError instanceof Error ? mlError.message : String(mlError),
subagent,
session_id,
operationId
});
}
// Check if refinement is needed and enabled
let refinementInfo = null;
if (enable_refinement && session_id) {
const needsRefinement = this.refinementManager.needsRefinement(enhancedAssessment, context, session_id);
if (needsRefinement) {
// Track refinement attempt
const refinementState = this.refinementManager.trackRefinementAttempt(session_id, subagent, enhancedAssessment.overallScore, 'Quality threshold not met');
// Record refinement event in analytics
this.analyticsManager.recordRefinementEvent(session_id, subagent, enhancedAssessment.overallScore, enhancedAssessment.overallScore + 0.1, // Simulate potential improvement
false // Not yet successful, just recording the attempt
);
// Generate refinement prompt
const refinementPrompt = this.refinementManager.generateRefinementPrompt(enhancedAssessment, context, requirements, validatedResponse);
refinementInfo = {
needs_refinement: true,
refinement_prompt: refinementPrompt,
attempt_number: refinementState.attemptNumber,
quality_trend: refinementState.qualityTrend,
max_attempts: 3 // Could be made configurable
};
}
else {
refinementInfo = {
needs_refinement: false,
reason: enhancedAssessment.passed ? 'Quality threshold met' : 'Maximum refinement attempts reached or refinement unlikely to help'
};
}
} // Create comprehensive validation result
const validationResult = {
// Enhanced quality assessment
enhanced_quality: {
overall_score: enhancedAssessment.overallScore,
quality_threshold: enhancedAssessment.qualityThreshold,
passed: enhancedAssessment.passed,
confidence: enhancedAssessment.confidence,
processing_time_ms: enhancedAssessment.processingTime,
can_refine: enhancedAssessment.canRefine
},
// Detailed quality breakdown
quality_details: {
category_scores: enhancedAssessment.categories,
rule_results: enhancedAssessment.ruleResults.map(result => ({
category: result.category,
passed: result.passed,
score: result.score,
severity: result.severity,
feedback: result.feedback
})),
critical_issues: enhancedAssessment.criticalIssues,
recommendations: enhancedAssessment.recommendations,
refinement_suggestions: enhancedAssessment.refinementSuggestions
},
// Legacy compatibility (simplified format)
legacy_quality: this.createLegacyAssessment(enhancedAssessment),
// Format validation details
format_validation: {
valid: true,
deliverables_present: Object.keys(validatedResponse.deliverables).length > 0,
memory_operations_count: validatedResponse.memory_operations?.length || 0,
metadata_complete: validatedResponse.metadata &&
validatedResponse.metadata.task_completion_status &&
validatedResponse.metadata.confidence_level
},
// Refinement information (if enabled)
...(refinementInfo && { refinement: refinementInfo }),
// Processing metadata
validation_metadata: {
validation_time_ms: Date.now() - startTime,
chatmode_used: subagent,
session_id: session_id || null,
validator_version: '3.0',
rules_applied: enhancedAssessment.ruleResults.length
}
};
logger.info('Enhanced quality validation completed', {
subagent,
session_id,
overall_score: enhancedAssessment.overallScore,
passed: enhancedAssessment.passed,
needs_refinement: refinementInfo?.needs_refinement || false,
processing_time: Date.now() - startTime
});
return {
content: [
{
type: 'text',
text: JSON.stringify(validationResult, null, 2),
},
],
};
}
catch (parseError) {
logger.error('Quality validation failed during parsing', {
error: parseError instanceof Error ? parseError.message : String(parseError),
processing_time: Date.now() - startTime
});
// Return error result with legacy compatibility
const errorResult = {
enhanced_quality: {
overall_score: 0,
passed: false,
confidence: 'low',
processing_time_ms: Date.now() - startTime,
can_refine: false
},
quality_details: {
category_scores: {},
rule_results: [],
critical_issues: [`Validation failed: ${parseError instanceof Error ? parseError.message : String(parseError)}`],
recommendations: ['Fix response format and try again'],
refinement_suggestions: []
},
legacy_quality: {
valid: false,
quality_score: 0,
validation_details: {
format_valid: false,
error: parseError instanceof Error ? parseError.message : String(parseError)
},
recommendations: ['Fix response format and retry validation']
},
format_validation: {
valid: false,
error: parseError instanceof Error ? parseError.message : String(parseError)
},
validation_metadata: {
validation_time_ms: Date.now() - startTime,
validator_version: '3.0',
rules_applied: 0
}
};
return {
content: [
{
type: 'text',
text: JSON.stringify(errorResult, null, 2),
},
],
};
}
}
/**
* Create a validation error response when JSON parsing fails
*/
createValidationErrorResponse(error, processingTime, fixesAttempted) {
return {
content: [
{
type: 'text',
text: JSON.stringify({
enhanced_quality: {
overall_score: 0,
passed: false,
confidence: 'low',
processing_time_ms: processingTime,
can_refine: false
},
quality_details: {
category_scores: {},
rule_results: [],
critical_issues: [`JSON parsing failed: ${error}`],
recommendations: ['Fix response format and try again'],
refinement_suggestions: [],
fixes_attempted: fixesAttempted
},
legacy_quality: {
valid: false,
quality_score: 0,
validation_details: {
format_valid: false,
error: error
},
recommendations: ['Fix response format and retry validation']
},
format_validation: {
valid: false,
error: error,
fixes_attempted: fixesAttempted
},
validation_metadata: {
validation_time_ms: processingTime,
validator_version: '3.0-forgiving',