UNPKG

vibe-coder-mcp

Version:

Production-ready MCP server with complete agent integration, multi-transport support, and comprehensive development automation tools for AI-assisted workflows.

853 lines (849 loc) 53.7 kB
import { performFormatAwareLlmCallWithCentralizedConfig, intelligentJsonParse } from '../../../utils/llmHelper.js'; import { ContextCuratorConfigLoader } from './config-loader.js'; import logger from '../../../logger.js'; import { buildIntentAnalysisPrompt, INTENT_ANALYSIS_SYSTEM_PROMPT, validateIntentAnalysisResponse, getIntentAnalysisTaskId } from '../prompts/intent-analysis.js'; import { buildPromptRefinementPrompt, PROMPT_REFINEMENT_SYSTEM_PROMPT, getPromptRefinementTaskId } from '../prompts/prompt-refinement.js'; import { buildFileDiscoveryPrompt, FILE_DISCOVERY_SYSTEM_PROMPT, validateFileDiscoveryResponse, getFileDiscoveryTaskId } from '../prompts/file-discovery.js'; import { buildRelevanceScoringPrompt, RELEVANCE_SCORING_SYSTEM_PROMPT, validateRelevanceScoringResponse, enhanceRelevanceScoringResponse, getRelevanceScoringTaskId } from '../prompts/relevance-scoring.js'; import { preprocessRelevanceScoringResponse } from '../utils/json-preprocessing.js'; import { buildMetaPromptGenerationPrompt, META_PROMPT_GENERATION_SYSTEM_PROMPT, validateMetaPromptGenerationResponse, attemptResponseRecovery, getMetaPromptGenerationTaskId } from '../prompts/meta-prompt-generation.js'; export class ContextCuratorLLMService { static instance = null; configLoader; constructor() { this.configLoader = ContextCuratorConfigLoader.getInstance(); } static getInstance() { if (!ContextCuratorLLMService.instance) { ContextCuratorLLMService.instance = new ContextCuratorLLMService(); } return ContextCuratorLLMService.instance; } async performResilientLlmCall(prompt, systemPrompt, config, taskId, expectedFormat = 'json', maxRetries = 3) { let lastError = null; for (let attempt = 1; attempt <= maxRetries; attempt++) { try { return await performFormatAwareLlmCallWithCentralizedConfig(prompt, systemPrompt, taskId, expectedFormat); } catch (error) { lastError = error; if (this.isRetryableNetworkError(error) && attempt < maxRetries) { const backoffMs = Math.pow(2, attempt) * 1000; logger.warn({ taskId, attempt, maxRetries, backoffMs, error: error instanceof Error ? error.message : String(error), errorType: this.categorizeNetworkError(error) }, 'Context Curator: Network error, retrying with exponential backoff'); await new Promise(resolve => setTimeout(resolve, backoffMs)); continue; } if (attempt === maxRetries) { logger.error({ taskId, totalAttempts: maxRetries, finalError: error instanceof Error ? error.message : String(error), errorType: this.categorizeNetworkError(error), suggestedAction: this.getSuggestedAction(error), networkDiagnostics: { sslConfigured: true, retryableError: this.isRetryableNetworkError(error), errorCategory: this.categorizeNetworkError(error) } }, 'Context Curator: All retry attempts failed'); } throw error; } } throw lastError || new Error('Unexpected error in resilient LLM call'); } isRetryableNetworkError(error) { if (!(error instanceof Error)) return false; const message = error.message.toLowerCase(); return (message.includes('ssl') || message.includes('tls') || message.includes('epipe') || message.includes('econnreset') || message.includes('econnrefused') || message.includes('bad record mac') || message.includes('timeout') || message.includes('network') || message.includes('socket') || message.includes('connection')); } categorizeNetworkError(error) { if (!(error instanceof Error)) return 'unknown'; const message = error.message.toLowerCase(); if (message.includes('ssl') || message.includes('tls') || message.includes('bad record mac')) { return 'ssl_tls_error'; } if (message.includes('epipe') || message.includes('econnreset')) { return 'connection_reset'; } if (message.includes('timeout')) { return 'timeout'; } if (message.includes('econnrefused')) { return 'connection_refused'; } return 'network_error'; } getSuggestedAction(error) { const errorType = this.categorizeNetworkError(error); switch (errorType) { case 'ssl_tls_error': return 'Check network connectivity and SSL/TLS configuration. Possible proxy or firewall interference.'; case 'connection_reset': return 'Network connection was reset. Check internet connectivity and API endpoint availability.'; case 'timeout': return 'Request timed out. Check network speed and API response times.'; case 'connection_refused': return 'Connection refused by server. Check API endpoint URL and availability.'; default: return 'General network error. Check internet connectivity and try again.'; } } async performIntentAnalysis(userPrompt, codemapContent, config, additionalContext) { const taskId = getIntentAnalysisTaskId(); const model = this.configLoader.getLLMModel('intent_analysis'); logger.info({ taskId, model }, 'Starting intent analysis'); try { const userPromptContent = buildIntentAnalysisPrompt(userPrompt, codemapContent, additionalContext); const responseText = await performFormatAwareLlmCallWithCentralizedConfig(userPromptContent, INTENT_ANALYSIS_SYSTEM_PROMPT, taskId, 'json'); const response = intelligentJsonParse(responseText, taskId); if (!validateIntentAnalysisResponse(response)) { throw new Error('Invalid intent analysis response format'); } const typedResponse = response; logger.info({ taskId, taskType: typedResponse.taskType, confidence: typedResponse.confidence }, 'Intent analysis completed successfully'); return response; } catch (error) { logger.error({ taskId, error: error instanceof Error ? error.message : 'Unknown error' }, 'Intent analysis failed'); throw new Error(`Intent analysis failed: ${error instanceof Error ? error.message : 'Unknown error'}`); } } async performPromptRefinement(originalPrompt, intentAnalysis, codemapContent, config, additionalContext) { const taskId = getPromptRefinementTaskId(); const model = this.configLoader.getLLMModel('prompt_refinement'); logger.info({ taskId, model, taskType: intentAnalysis.taskType }, 'Starting prompt refinement'); try { const userPromptContent = buildPromptRefinementPrompt(originalPrompt, intentAnalysis, codemapContent, additionalContext); const responseText = await this.performResilientLlmCall(userPromptContent, PROMPT_REFINEMENT_SYSTEM_PROMPT, config, taskId, 'json', 3); const response = intelligentJsonParse(responseText, taskId); if (!response || typeof response !== 'object') { logger.error({ taskId, response: typeof response }, 'Prompt refinement response is not an object'); throw new Error('Invalid prompt refinement response format: not an object'); } const obj = response; logger.debug({ taskId, responseKeys: Object.keys(obj), refinedPromptType: typeof obj.refinedPrompt, enhancementReasoningType: typeof obj.enhancementReasoning, enhancementReasoningIsArray: Array.isArray(obj.enhancementReasoning), enhancementReasoningLength: Array.isArray(obj.enhancementReasoning) ? obj.enhancementReasoning.length : 'not array', addedContextType: typeof obj.addedContext, addedContextIsArray: Array.isArray(obj.addedContext), responsePreview: JSON.stringify(obj).substring(0, 500) }, 'Prompt refinement response structure analysis'); if (typeof obj.refinedPrompt !== 'string' || obj.refinedPrompt.length === 0) { logger.error({ taskId, refinedPromptType: typeof obj.refinedPrompt, refinedPromptValue: obj.refinedPrompt }, 'Invalid refinedPrompt field'); throw new Error('Invalid prompt refinement response format: missing or invalid refinedPrompt'); } let enhancementReasoning = []; if (Array.isArray(obj.enhancementReasoning)) { enhancementReasoning = obj.enhancementReasoning.filter(item => typeof item === 'string' && item.length > 0); } else if (typeof obj.enhancementReasoning === 'string') { enhancementReasoning = [obj.enhancementReasoning]; } else if (obj.enhancementReasoning) { logger.warn({ taskId, enhancementReasoningType: typeof obj.enhancementReasoning, enhancementReasoningValue: obj.enhancementReasoning }, 'Unexpected enhancementReasoning format, attempting to convert'); enhancementReasoning = [String(obj.enhancementReasoning)]; } if (enhancementReasoning.length === 0) { logger.info({ taskId, originalEnhancementReasoning: obj.enhancementReasoning }, 'enhancementReasoning is missing, generating default reasoning'); const refinedPrompt = obj.refinedPrompt; const originalLength = originalPrompt.length; const refinedLength = refinedPrompt.length; enhancementReasoning = [ `Enhanced prompt from ${originalLength} to ${refinedLength} characters for better clarity and specificity`, 'Added contextual information to improve development guidance', 'Structured requirements for more comprehensive implementation' ]; if (refinedPrompt.toLowerCase().includes('cli') || refinedPrompt.toLowerCase().includes('command line')) { enhancementReasoning.push('Added CLI-specific requirements and interface specifications'); } if (refinedPrompt.toLowerCase().includes('test') || refinedPrompt.toLowerCase().includes('testing')) { enhancementReasoning.push('Included testing requirements and quality assurance guidelines'); } if (refinedPrompt.toLowerCase().includes('error') || refinedPrompt.toLowerCase().includes('handling')) { enhancementReasoning.push('Enhanced error handling and robustness requirements'); } logger.debug({ taskId, generatedReasoningCount: enhancementReasoning.length }, 'Generated default enhancement reasoning'); } let addedContext = []; if (Array.isArray(obj.addedContext)) { addedContext = obj.addedContext.filter(item => typeof item === 'string'); } else if (typeof obj.addedContext === 'string') { addedContext = [obj.addedContext]; } else if (obj.addedContext) { logger.warn({ taskId, addedContextType: typeof obj.addedContext, addedContextValue: obj.addedContext }, 'Unexpected addedContext format, attempting to convert'); addedContext = [String(obj.addedContext)]; } const enhancementCount = enhancementReasoning.length; const { calculateImprovementMetrics, extractContextualEnhancements } = await import('../prompts/prompt-refinement.js'); const metrics = calculateImprovementMetrics(originalPrompt, obj.refinedPrompt, enhancementCount); const contextualEnhancements = extractContextualEnhancements(enhancementReasoning, addedContext); const completeResponse = { refinedPrompt: obj.refinedPrompt, enhancementReasoning: enhancementReasoning, addedContext: addedContext, originalLength: metrics.originalLength, refinedLength: metrics.refinedLength, improvementScore: metrics.improvementScore, contextualEnhancements }; logger.info({ taskId, refinedLength: completeResponse.refinedPrompt.length, enhancementCount: completeResponse.enhancementReasoning.length, addedContextCount: completeResponse.addedContext.length, improvementScore: completeResponse.improvementScore, contextualEnhancements: completeResponse.contextualEnhancements }, 'Prompt refinement completed successfully'); return completeResponse; } catch (error) { logger.error({ taskId, error: error instanceof Error ? error.message : 'Unknown error' }, 'Prompt refinement failed'); throw new Error(`Prompt refinement failed: ${error instanceof Error ? error.message : 'Unknown error'}`); } } async performFileDiscovery(originalPrompt, intentAnalysis, codemapContent, config, searchStrategy = 'semantic_similarity', additionalContext) { const taskId = getFileDiscoveryTaskId(searchStrategy); const model = this.configLoader.getLLMModel('file_discovery'); logger.info({ taskId, model, searchStrategy, taskType: intentAnalysis.taskType }, 'Starting file discovery'); try { const userPromptContent = buildFileDiscoveryPrompt(originalPrompt, intentAnalysis, codemapContent, searchStrategy, additionalContext); const responseText = await this.performResilientLlmCall(userPromptContent, FILE_DISCOVERY_SYSTEM_PROMPT, config, taskId, 'json', 3); logger.debug({ taskId, responseLength: responseText.length, responsePreview: responseText.substring(0, 500), searchStrategy }, 'File discovery LLM response received'); const response = intelligentJsonParse(responseText, taskId); logger.info({ taskId, parsedResponseType: typeof response, parsedResponseKeys: response && typeof response === 'object' ? Object.keys(response) : 'not an object', hasRelevantFiles: response && typeof response === 'object' && 'relevantFiles' in response, relevantFilesLength: response && typeof response === 'object' && 'relevantFiles' in response && Array.isArray(response.relevantFiles) ? response.relevantFiles.length : 'not an array', formatCorrected: response && typeof response === 'object' && 'relevantFiles' in response && 'searchStrategy' in response, searchStrategy: response && typeof response === 'object' && 'searchStrategy' in response ? response.searchStrategy : 'not present' }, 'File discovery response parsed and format corrections applied'); if (!validateFileDiscoveryResponse(response)) { logger.error({ taskId, response: JSON.stringify(response, null, 2), validationDetails: { hasRelevantFiles: response && typeof response === 'object' && 'relevantFiles' in response, relevantFilesType: response && typeof response === 'object' && 'relevantFiles' in response ? typeof response.relevantFiles : 'missing', relevantFilesIsArray: response && typeof response === 'object' && 'relevantFiles' in response ? Array.isArray(response.relevantFiles) : false, firstFileExample: response && typeof response === 'object' && 'relevantFiles' in response && Array.isArray(response.relevantFiles) && response.relevantFiles.length > 0 ? response.relevantFiles[0] : 'no files', requiredFields: ['relevantFiles', 'totalFilesAnalyzed', 'processingTimeMs', 'searchStrategy', 'coverageMetrics'], presentFields: response && typeof response === 'object' ? Object.keys(response) : [] } }, 'File discovery response validation failed - DETAILED DEBUG'); const fixedResponse = this.fixAbstractFileNames(response, codemapContent, taskId); if (fixedResponse && validateFileDiscoveryResponse(fixedResponse)) { logger.info({ taskId, originalFiles: response && typeof response === 'object' && 'relevantFiles' in response && Array.isArray(response.relevantFiles) ? response.relevantFiles.map((f) => f.path) : [], fixedFiles: fixedResponse.relevantFiles.map((f) => f.path) }, 'Successfully fixed abstract file names'); return fixedResponse; } throw new Error('Invalid file discovery response format'); } const typedResponse = response; logger.info({ taskId, filesFound: typedResponse.relevantFiles.length, totalAnalyzed: typedResponse.totalFilesAnalyzed, strategy: typedResponse.searchStrategy }, 'File discovery completed successfully'); return response; } catch (error) { logger.error({ taskId, searchStrategy, error: error instanceof Error ? error.message : 'Unknown error', errorStack: error instanceof Error ? error.stack : undefined, errorName: error instanceof Error ? error.name : undefined }, 'File discovery failed - DETAILED ERROR'); throw new Error(`File discovery failed for strategy ${searchStrategy}: ${error instanceof Error ? error.message : 'Unknown error'}`); } } fixAbstractFileNames(response, codemapContent, taskId) { try { if (!response || typeof response !== 'object' || !Array.isArray(response.relevantFiles)) { return null; } const filePathRegex = /(?:^|\s)([a-zA-Z0-9_-]+(?:\/[a-zA-Z0-9_.-]+)*\.[a-zA-Z0-9]+)(?:\s|$)/gm; const actualFilePaths = new Set(); let match; while ((match = filePathRegex.exec(codemapContent)) !== null) { actualFilePaths.add(match[1]); } const directoryPathRegex = /(?:^|\n)\s*([a-zA-Z0-9_-]+(?:\/[a-zA-Z0-9_.-]+)*\.[a-zA-Z0-9]+)/gm; while ((match = directoryPathRegex.exec(codemapContent)) !== null) { actualFilePaths.add(match[1]); } const actualFilePathsArray = Array.from(actualFilePaths); logger.debug({ taskId, extractedPaths: actualFilePathsArray.slice(0, 10), totalPaths: actualFilePathsArray.length }, 'Extracted actual file paths from codemap'); const fixedFiles = response.relevantFiles.map((file) => { if (!file || typeof file !== 'object' || typeof file.path !== 'string') { return file; } const originalPath = file.path; if (originalPath.includes('/') && originalPath.includes('.')) { return file; } const matchingPath = this.findBestFilePathMatch(originalPath, actualFilePathsArray); if (matchingPath) { logger.debug({ taskId, originalPath, matchingPath }, 'Fixed abstract file name'); return { ...file, path: matchingPath }; } return file; }); return { ...response, relevantFiles: fixedFiles }; } catch (error) { logger.error({ taskId, error: error instanceof Error ? error.message : String(error) }, 'Error fixing abstract file names'); return null; } } findBestFilePathMatch(abstractName, actualPaths) { const normalizedAbstract = abstractName.toLowerCase().replace(/[^a-z0-9]/g, ''); for (const path of actualPaths) { const fileName = path.split('/').pop()?.toLowerCase().replace(/[^a-z0-9]/g, '') || ''; if (fileName === normalizedAbstract) { return path; } } for (const path of actualPaths) { const fileName = path.split('/').pop()?.toLowerCase().replace(/[^a-z0-9]/g, '') || ''; if (fileName.includes(normalizedAbstract) || normalizedAbstract.includes(fileName)) { return path; } } for (const path of actualPaths) { const normalizedPath = path.toLowerCase().replace(/[^a-z0-9]/g, ''); if (normalizedPath.includes(normalizedAbstract) || normalizedAbstract.includes(normalizedPath)) { return path; } } return null; } async retryRelevanceScoring(originalPrompt, expectedFiles, config, taskId, attempt = 1, maxAttempts = 2) { if (attempt > maxAttempts) { logger.warn({ taskId, maxAttempts, finalAttempt: attempt }, `Context Curator: Max retry attempts (${maxAttempts}) reached. Using original response.`); return null; } logger.info({ taskId, attempt, maxAttempts, expectedFileCount: expectedFiles.length }, `Context Curator: Retry attempt ${attempt}/${maxAttempts} for relevance scoring`); const retryPrompt = `${originalPrompt} CRITICAL RETRY INSTRUCTIONS: - Previous attempt returned incomplete response - You MUST score ALL ${expectedFiles.length} files - Return JSON array format with fileScores containing ${expectedFiles.length} objects - Do NOT return a single file object - Validate your response contains exactly ${expectedFiles.length} file scores before responding`; try { const responseText = await this.performResilientLlmCall(retryPrompt, RELEVANCE_SCORING_SYSTEM_PROMPT, config, taskId, 'json', 2); logger.info({ taskId, attempt, responseLength: responseText.length, containsFileScores: responseText.includes('fileScores') }, `Context Curator: Retry attempt ${attempt} completed`); return responseText; } catch (error) { logger.error({ taskId, attempt, error: error instanceof Error ? error.message : 'Unknown error' }, `Context Curator: Retry attempt ${attempt} failed`); return null; } } async processFilesInChunks(originalPrompt, intentAnalysis, refinedPrompt, fileDiscoveryResult, config, scoringStrategy, additionalContext, chunkSize = 20) { const files = fileDiscoveryResult.relevantFiles; const chunks = []; for (let i = 0; i < files.length; i += chunkSize) { chunks.push(files.slice(i, i + chunkSize)); } logger.info({ totalFiles: files.length, totalChunks: chunks.length, chunkSize, scoringStrategy }, `Context Curator: Processing ${files.length} files in ${chunks.length} chunks of ${chunkSize} files each`); const allFileScores = []; let totalProcessingTime = 0; for (let i = 0; i < chunks.length; i++) { const chunk = chunks[i]; logger.info({ chunkIndex: i + 1, totalChunks: chunks.length, chunkSize: chunk.length }, `Context Curator: Processing chunk ${i + 1}/${chunks.length} with ${chunk.length} files`); const chunkFileDiscoveryResult = { ...fileDiscoveryResult, relevantFiles: chunk }; try { const chunkPrompt = buildRelevanceScoringPrompt(originalPrompt, intentAnalysis, refinedPrompt, chunkFileDiscoveryResult, scoringStrategy, additionalContext); const enhancedChunkPrompt = `${chunkPrompt} CHUNK PROCESSING: This is chunk ${i + 1} of ${chunks.length} Score these ${chunk.length} files from the complete list of ${files.length} files: ${chunk.map(f => `- ${f.path}`).join('\n')} Return the same JSON format but only for these ${chunk.length} files.`; const startTime = Date.now(); const chunkResponseText = await this.performResilientLlmCall(enhancedChunkPrompt, RELEVANCE_SCORING_SYSTEM_PROMPT, config, `${getRelevanceScoringTaskId()}_chunk_${i + 1}`, 'json', 2); const chunkProcessingTime = Date.now() - startTime; totalProcessingTime += chunkProcessingTime; logger.info({ chunkIndex: i + 1, processingTime: chunkProcessingTime, responseLength: chunkResponseText.length }, `Context Curator: Chunk ${i + 1} processed in ${chunkProcessingTime}ms`); const preprocessedResponse = preprocessRelevanceScoringResponse(chunkResponseText, `chunk_${i + 1}`); const chunkResponse = intelligentJsonParse(preprocessedResponse, `chunk_${i + 1}`); if (chunkResponse && typeof chunkResponse === 'object') { const obj = chunkResponse; if (Array.isArray(obj.fileScores)) { allFileScores.push(...obj.fileScores.map(score => ({ filePath: String(score.filePath || ''), relevanceScore: Number(score.relevanceScore || 0), confidence: Number(score.confidence || 0), reasoning: String(score.reasoning || ''), categories: Array.isArray(score.categories) ? score.categories.map(String) : [], modificationLikelihood: (['very_high', 'high', 'medium', 'low', 'very_low'].includes(String(score.modificationLikelihood || 'low')) ? String(score.modificationLikelihood || 'low') : 'low'), estimatedTokens: Number(score.estimatedTokens || 0) }))); logger.info({ chunkIndex: i + 1, scoresAdded: obj.fileScores.length, totalScores: allFileScores.length }, `Context Curator: Added ${obj.fileScores.length} scores from chunk ${i + 1}`); } else if ('filePath' in obj && 'relevanceScore' in obj) { allFileScores.push({ filePath: String(obj.filePath || ''), relevanceScore: Number(obj.relevanceScore || 0), confidence: Number(obj.confidence || 0), reasoning: String(obj.reasoning || ''), categories: Array.isArray(obj.categories) ? obj.categories.map(String) : [], modificationLikelihood: (['very_high', 'high', 'medium', 'low', 'very_low'].includes(String(obj.modificationLikelihood || 'low')) ? String(obj.modificationLikelihood || 'low') : 'low'), estimatedTokens: Number(obj.estimatedTokens || 0) }); logger.info({ chunkIndex: i + 1, scoresAdded: 1, totalScores: allFileScores.length }, `Context Curator: Added 1 score (single file format) from chunk ${i + 1}`); } } } catch (error) { logger.warn({ chunkIndex: i + 1, chunkSize: chunk.length, error: error instanceof Error ? error.message : 'Unknown error' }, `Context Curator: Error processing chunk ${i + 1}, adding default scores for chunk files`); for (const file of chunk) { allFileScores.push({ filePath: file.path, relevanceScore: 0.3, confidence: 0.5, reasoning: 'Auto-generated score: Chunk processing failed', categories: ['utility'], modificationLikelihood: 'low', estimatedTokens: file.estimatedTokens }); } } } logger.info({ totalFiles: files.length, totalScored: allFileScores.length, totalProcessingTime, chunksProcessed: chunks.length }, `Context Curator: Chunked processing completed. Total files scored: ${allFileScores.length}/${files.length}`); const averageRelevance = allFileScores.length > 0 ? allFileScores.reduce((sum, f) => sum + (f.relevanceScore || 0.3), 0) / allFileScores.length : 0; const result = { fileScores: allFileScores, overallMetrics: { averageRelevance: Math.round(averageRelevance * 100) / 100, totalFilesScored: allFileScores.length, highRelevanceCount: allFileScores.filter(f => (f.relevanceScore || 0) >= 0.7).length, processingTimeMs: totalProcessingTime }, scoringStrategy: scoringStrategy, chunkingUsed: true, totalChunks: chunks.length, chunkSize: chunkSize }; return result; } async performRelevanceScoring(originalPrompt, intentAnalysis, refinedPrompt, fileDiscoveryResult, config, scoringStrategy = 'semantic_similarity', additionalContext, externalTaskId) { const taskId = externalTaskId || getRelevanceScoringTaskId(); const model = this.configLoader.getLLMModel('relevance_ranking'); const fileCount = fileDiscoveryResult.relevantFiles.length; logger.info({ taskId, model, scoringStrategy, filesToScore: fileCount, thresholdCheck: fileCount > 40, chunkingRequired: fileCount > 40 ? 'YES' : 'NO' }, 'Context Curator: Starting relevance scoring with threshold diagnostics'); try { if (fileCount > 40) { logger.info({ taskId, fileCount, threshold: 40, chunkSize: 20, expectedChunks: Math.ceil(fileCount / 20) }, `Context Curator: File count (${fileCount}) exceeds threshold (40). Using chunked processing.`); return this.processFilesInChunks(originalPrompt, intentAnalysis, refinedPrompt, fileDiscoveryResult, config, scoringStrategy, additionalContext, 20); } logger.info({ taskId, fileCount, threshold: 40, processingMode: 'single_batch' }, `Context Curator: File count (${fileCount}) is within threshold (40). Using single batch processing.`); const userPromptContent = buildRelevanceScoringPrompt(originalPrompt, intentAnalysis, refinedPrompt, fileDiscoveryResult, scoringStrategy, additionalContext); let responseText = await this.performResilientLlmCall(userPromptContent, RELEVANCE_SCORING_SYSTEM_PROMPT, config, taskId, 'json', 3); logger.info({ taskId, expectedFileCount: fileDiscoveryResult.relevantFiles.length, responseContainsFileScores: responseText.includes('fileScores'), responseContainsSingleFile: responseText.includes('filePath') && !responseText.includes('fileScores'), responseLength: responseText.length }, 'Context Curator: Relevance scoring response analysis'); logger.info({ taskId, responseLength: responseText.length, responsePreview: responseText.substring(0, 500), responseEnd: responseText.substring(Math.max(0, responseText.length - 200)), containsFileScores: responseText.includes('fileScores'), containsOverallMetrics: responseText.includes('overallMetrics'), containsScoringStrategy: responseText.includes('scoringStrategy'), startsWithBrace: responseText.trim().startsWith('{'), endsWithBrace: responseText.trim().endsWith('}') }, 'Relevance scoring LLM response received - DETAILED DEBUG'); const preprocessedResponse = preprocessRelevanceScoringResponse(responseText, taskId); let response = intelligentJsonParse(preprocessedResponse, taskId); let retryAttempted = false; if (response && typeof response === 'object') { const obj = response; if ('filePath' in obj && 'relevanceScore' in obj && !('fileScores' in obj)) { logger.warn({ taskId, expectedFiles: fileDiscoveryResult.relevantFiles.length, responseType: 'single_file', retryAttempt: 'initiating' }, `Context Curator: LLM returned single file instead of array for ${fileDiscoveryResult.relevantFiles.length} files. Retrying with modified prompt.`); const expectedFiles = fileDiscoveryResult.relevantFiles.map(f => ({ path: f.path, estimatedTokens: f.estimatedTokens })); const retryResponse = await this.retryRelevanceScoring(userPromptContent, expectedFiles, config, taskId, 1); retryAttempted = true; if (retryResponse) { responseText = retryResponse; response = intelligentJsonParse(preprocessRelevanceScoringResponse(retryResponse, taskId), taskId); logger.info({ taskId, retrySuccess: true, newResponseLength: retryResponse.length, containsFileScores: retryResponse.includes('fileScores') }, 'Context Curator: Retry for single file response completed successfully'); } else { logger.warn({ taskId, retrySuccess: false, fallbackRequired: true }, 'Context Curator: Retry for single file response failed, will use fallback enhancement'); } } else if (Array.isArray(obj.fileScores) && obj.fileScores.length < fileDiscoveryResult.relevantFiles.length * 0.8) { const completionRate = Math.round(obj.fileScores.length / fileDiscoveryResult.relevantFiles.length * 100); logger.warn({ taskId, scoredFiles: obj.fileScores.length, expectedFiles: fileDiscoveryResult.relevantFiles.length, completionRate, retryAttempt: 'initiating' }, `Context Curator: LLM only scored ${obj.fileScores.length}/${fileDiscoveryResult.relevantFiles.length} files (${completionRate}%). Retrying.`); const expectedFiles = fileDiscoveryResult.relevantFiles.map(f => ({ path: f.path, estimatedTokens: f.estimatedTokens })); const retryResponse = await this.retryRelevanceScoring(userPromptContent, expectedFiles, config, taskId, 1); retryAttempted = true; if (retryResponse) { responseText = retryResponse; response = intelligentJsonParse(preprocessRelevanceScoringResponse(retryResponse, taskId), taskId); logger.info({ taskId, retrySuccess: true, newResponseLength: retryResponse.length, containsFileScores: retryResponse.includes('fileScores') }, 'Context Curator: Retry for incomplete array completed successfully'); } else { logger.warn({ taskId, retrySuccess: false, fallbackRequired: true }, 'Context Curator: Retry for incomplete array failed, will use fallback enhancement'); } } } const expectedFiles = fileDiscoveryResult.relevantFiles.map(f => ({ path: f.path, estimatedTokens: f.estimatedTokens })); let enhancedResponse; if (retryAttempted) { if (response && typeof response === 'object') { const obj = response; if (!('fileScores' in obj) && !('filePath' in obj)) { logger.info({ taskId, reason: 'parsing_failure_after_retry' }, 'Context Curator: Applying fallback enhancement after retry due to parsing failure'); enhancedResponse = enhanceRelevanceScoringResponse(response, scoringStrategy, fileDiscoveryResult.processingTimeMs, expectedFiles); } else { logger.warn({ taskId, reason: 'format_violation_after_retry', hasFileScores: 'fileScores' in obj, hasSingleFile: 'filePath' in obj }, 'Context Curator: Format violation persists after retry, using response as-is'); enhancedResponse = enhanceRelevanceScoringResponse(response, scoringStrategy, fileDiscoveryResult.processingTimeMs, expectedFiles); } } else { logger.warn({ taskId, reason: 'invalid_response_after_retry' }, 'Context Curator: Invalid response after retry, applying fallback enhancement'); enhancedResponse = enhanceRelevanceScoringResponse(response, scoringStrategy, fileDiscoveryResult.processingTimeMs, expectedFiles); } } else { logger.info({ taskId, reason: 'no_retry_needed' }, 'Context Curator: No retry needed, applying standard enhancement'); enhancedResponse = enhanceRelevanceScoringResponse(response, scoringStrategy, fileDiscoveryResult.processingTimeMs, expectedFiles); } if (!validateRelevanceScoringResponse(enhancedResponse)) { logger.error({ taskId, originalResponse: response, enhancedResponse, responseKeys: enhancedResponse && typeof enhancedResponse === 'object' ? Object.keys(enhancedResponse) : 'not an object', retryAttempted }, 'Enhanced relevance scoring response validation failed'); throw new Error('Invalid relevance scoring response format after enhancement'); } const typedResponse = enhancedResponse; logger.info({ taskId, filesScored: typedResponse.fileScores.length, averageRelevance: typedResponse.overallMetrics.averageRelevance, highRelevanceCount: typedResponse.overallMetrics.highRelevanceCount }, 'Relevance scoring completed successfully'); return typedResponse; } catch (error) { logger.error({ taskId, error: error instanceof Error ? error.message : 'Unknown error' }, 'Relevance scoring failed, attempting fallback scoring'); try { logger.info({ taskId, fileCount: fileDiscoveryResult.relevantFiles.length }, 'Applying fallback scoring due to LLM failure'); const fallbackScores = fileDiscoveryResult.relevantFiles.map(file => { const fallbackResult = this.calculateSimpleFallbackScore(file.path, originalPrompt); return { filePath: file.path, relevanceScore: fallbackResult.score, confidence: fallbackResult.confidence, reasoning: fallbackResult.reasoning, categories: this.extractCategoriesFromPath(file.path), modificationLikelihood: file.modificationLikelihood || 'medium', estimatedTokens: file.estimatedTokens }; }); const fallbackResponse = { fileScores: fallbackScores, overallMetrics: { averageRelevance: fallbackScores.reduce((sum, s) => sum + s.relevanceScore, 0) / fallbackScores.length, totalFilesScored: fallbackScores.length, highRelevanceCount: fallbackScores.filter(s => s.relevanceScore >= 0.7).length, processingTimeMs: 100 }, scoringStrategy: 'hybrid' }; logger.info({ taskId, filesScored: fallbackResponse.fileScores.length, averageRelevance: fallbackResponse.overallMetrics.averageRelevance, highRelevanceCount: fallbackResponse.overallMetrics.highRelevanceCount }, 'Fallback scoring completed successfully'); return fallbackResponse; } catch (fallbackError) { logger.error({ taskId, originalError: error instanceof Error ? error.message : 'Unknown error', fallbackError: fallbackError instanceof Error ? fallbackError.message : 'Unknown error' }, 'Both LLM and fallback scoring failed'); throw new Error(`Relevance scoring failed: ${error instanceof Error ? error.message : 'Unknown error'}`); } } } async performMetaPromptGeneration(originalPrompt, intentAnalysis, refinedPrompt, relevanceScoringResult, config, additionalContext) { const taskId = getMetaPromptGenerationTaskId(); const model = this.configLoader.getLLMModel('meta_prompt_generation'); logger.info({ taskId, model, taskType: intentAnalysis.taskType, relevantFiles: relevanceScoringResult.fileScores.length }, 'Starting meta-prompt generation'); try { const userPromptContent = buildMetaPromptGenerationPrompt(originalPrompt, intentAnalysis, refinedPrompt, relevanceScoringResult, additionalContext); const responseText = await this.performResilientLlmCall(userPromptContent, META_PROMPT_GENERATION_SYSTEM_PROMPT, config, taskId, 'json', 3); logger.info({ taskId, responseContainsSystemPrompt: responseText.includes('systemPrompt'), responseContainsTaskDecomposition: responseText.includes('taskDecomposition'), responseContainsEpicsArray: responseText.includes('"epics":['), responseIsLikelySingleEpic: responseText.includes('"id":"epic-') && !responseText.includes('systemPrompt'), responseLength: responseText.length }, 'Context Curator: Meta-prompt generation response analysis'); let response = intelligentJsonParse(responseText, taskId); const responseStructure = response && typeof response === 'object' ? { hasSystemPrompt: 'systemPrompt' in response, hasUserPrompt: 'userPrompt' in response, hasContextSummary: 'contextSummary' in response, hasTaskDecomposition: 'taskDecomposition' in response, hasGuidelines: 'guidelines' in response, hasEstimatedComplexity: 'estimatedComplexity' in response, hasQualityScore: 'qualityScore' in response, hasAiAgentResponseFormat: 'aiAgentResponseFormat' in response, hasEpicsArray: response && typeof response === 'object' && 'taskDecomposition' in response && response.taskDecomposition && typeof response.taskDecomposition === 'object' && 'epics' in response.taskDecomposition ? Array.isArray(response.taskDecomposition.epics) : false, isSingleEpic: 'id' in response && 'title' in response && 'tasks' in response, topLevelKeys: Object.keys(response), taskDecompositionKeys: response && typeof response === 'object' && 'taskDecomposition' in response && response.taskDecomposition && typeof response.taskDecomposition === 'object' ? Object.keys(response.taskDecomposition) : [] } : null; logger.info({ taskId, parsedResponseType: typeof response, parsedResponseKeys: response && typeof response === 'object' ? Object.keys(response) : 'not an object', responseStructure, responsePreview: typeof response === 'object' ? JSON.stringify(response).substring(0, 500) : String(response).substring(0, 500), rawResponseLength: responseText.length, rawResponsePreview: responseText.substring(0, 300) }, 'Meta-prompt generation response parsed'); logger.debug({ taskId, responseStructure, responseLength: responseText.length, responsePreview: responseText.substring(0, 500) }, 'Meta-prompt response structure analysis'); if (!validateMetaPromptGenerationResponse(response)) { logger.warn({ taskId, originalResponseKeys: response && typeof response === 'object' ? Object.keys(response) : 'not an object' }, 'Initial validation failed, attempting response recovery'); const recoveredResponse = attemptResponseRecovery(response); if (validateMetaPromptGenerationResponse(recoveredResponse)) { logger.info({ taskId, recoveredResponseKeys: recoveredResponse && typeof recoveredResponse === 'object' ? Object.keys(recoveredResponse) : 'not an object' }, 'Response recovery successful'); response = recoveredResponse; } else { logger.error({ taskId, originalResponse: typeof response === 'object' ? JSON.stringify(response, null, 2) : response, recoveredResponse: typeof recoveredResponse === 'object' ? JSON.stringify(recoveredResponse, null, 2) : recoveredResponse, responseType: typeof response, responseKeys: response && typeof response === 'object' ? Object.keys(response) : 'not an object' }, 'Meta-prompt generation response validation failed even after recovery attempt'); throw new Error('Invalid meta-prompt generation response format'); } } const finalResponse = response; logger.info({ taskId, qualityScore: finalResponse.qualityScore, complexity: finalResponse.estimatedComplexity, epicsCount: finalResponse.taskDecomposition.epics.length, guidelinesCount: finalResponse.guidelines.length }, 'Meta-prompt generation completed successfully'); return response; } catch (error) { logger.error({ taskId, error: error instanceof Error ? error.message : 'Unknown error' }, 'Meta-prompt generation failed'); throw new Error(`Meta-prompt generation failed: ${error instanceof Error ? error.message : 'Unknown error'}`); } } calculateSimpleFallbackScore(filePath, userPrompt) { let score =