vibe-coder-mcp
Version:
Production-ready MCP server with complete agent integration, multi-transport support, and comprehensive development automation tools for AI-assisted workflows.
1,016 lines (993 loc) ⢠78.8 kB
JavaScript
import { performFormatAwareLlmCall } from '../../../utils/llmHelper.js';
import { AtomicTaskDetector } from './atomic-detector.js';
import { getPrompt } from '../services/prompt-service.js';
import { getTimeoutManager } from '../utils/timeout-manager.js';
import { getIdGenerator } from '../utils/id-generator.js';
import logger from '../../../logger.js';
class DecompositionCircuitBreaker {
attempts = new Map();
failures = new Map();
lastAttempt = new Map();
maxAttempts;
maxFailures;
cooldownPeriod;
constructor(maxAttempts = 3, maxFailures = 2, cooldownPeriod = 60000) {
this.maxAttempts = maxAttempts;
this.maxFailures = maxFailures;
this.cooldownPeriod = cooldownPeriod;
}
canAttempt(taskId) {
const attempts = this.attempts.get(taskId) || 0;
const failures = this.failures.get(taskId) || 0;
const lastAttemptTime = this.lastAttempt.get(taskId) || 0;
const now = Date.now();
if (lastAttemptTime > 0 && (now - lastAttemptTime) < this.cooldownPeriod) {
logger.warn({ taskId, cooldownRemaining: this.cooldownPeriod - (now - lastAttemptTime) }, 'Task decomposition in cooldown period');
return false;
}
if (attempts >= this.maxAttempts) {
logger.warn({ taskId, attempts, maxAttempts: this.maxAttempts }, 'Task decomposition max attempts reached');
return false;
}
if (failures >= this.maxFailures) {
logger.warn({ taskId, failures, maxFailures: this.maxFailures }, 'Task decomposition max failures reached');
return false;
}
return true;
}
recordAttempt(taskId) {
const attempts = this.attempts.get(taskId) || 0;
this.attempts.set(taskId, attempts + 1);
this.lastAttempt.set(taskId, Date.now());
}
recordFailure(taskId) {
const failures = this.failures.get(taskId) || 0;
this.failures.set(taskId, failures + 1);
}
recordSuccess(taskId) {
this.attempts.delete(taskId);
this.failures.delete(taskId);
this.lastAttempt.delete(taskId);
}
getStats(taskId) {
return {
attempts: this.attempts.get(taskId) || 0,
failures: this.failures.get(taskId) || 0,
canAttempt: this.canAttempt(taskId)
};
}
reset(taskId) {
if (taskId) {
this.attempts.delete(taskId);
this.failures.delete(taskId);
this.lastAttempt.delete(taskId);
}
else {
this.attempts.clear();
this.failures.clear();
this.lastAttempt.clear();
}
}
}
export class RDDEngine {
config;
atomicDetector;
rddConfig;
activeOperations = new Map();
circuitBreaker;
constructor(config, rddConfig) {
this.config = config;
this.rddConfig = {
maxDepth: 5,
maxSubTasks: 400,
minConfidence: 0.8,
enableParallelDecomposition: false,
epicTimeLimit: 400,
...rddConfig
};
this.atomicDetector = new AtomicTaskDetector(config, {
epicTimeLimit: this.rddConfig.epicTimeLimit
});
this.circuitBreaker = new DecompositionCircuitBreaker(3, 2, 15000);
this.initializeCentralizedConfig().catch(error => {
logger.debug({ err: error }, 'Could not load centralized RDD config, using defaults');
});
}
async initializeCentralizedConfig() {
try {
const { getVibeTaskManagerConfig } = await import('../utils/config-loader.js');
const vibeConfig = await getVibeTaskManagerConfig();
if (vibeConfig?.taskManager?.rddConfig) {
const configBasedRDD = vibeConfig.taskManager.rddConfig;
this.rddConfig = {
...configBasedRDD,
...this.rddConfig
};
this.atomicDetector = new AtomicTaskDetector(this.config, {
epicTimeLimit: this.rddConfig.epicTimeLimit
});
logger.debug({
finalConfig: this.rddConfig,
source: 'centralized_config_merged'
}, 'Applied centralized RDD configuration');
}
}
catch (error) {
logger.debug({ err: error }, 'Could not load centralized RDD config, using defaults');
}
}
async decomposeTask(task, context, depth = 0) {
const operationId = `decompose-${task.id}-${Date.now()}`;
this.trackOperation(operationId, 'decomposition', task.id);
if (!this.circuitBreaker.canAttempt(task.id)) {
const stats = this.circuitBreaker.getStats(task.id);
logger.warn({
taskId: task.id,
depth,
circuitBreakerStats: stats
}, 'Circuit breaker preventing decomposition attempt');
this.completeOperation(operationId);
return {
success: true,
isAtomic: true,
originalTask: task,
subTasks: [],
analysis: {
isAtomic: true,
confidence: 0.9,
reasoning: 'Task marked as atomic due to circuit breaker protection (too many failed decomposition attempts)',
estimatedHours: task.estimatedHours,
complexityFactors: ['circuit_breaker_protection', 'decomposition_failure_limit'],
recommendations: ['Manual task breakdown recommended', 'Review task complexity']
},
error: 'Circuit breaker protection activated',
depth
};
}
this.circuitBreaker.recordAttempt(task.id);
logger.info({ taskId: task.id, depth, operationId }, 'Starting RDD decomposition');
try {
if (depth >= this.rddConfig.maxDepth) {
logger.warn({ taskId: task.id, depth }, 'Maximum decomposition depth reached');
return {
success: true,
isAtomic: true,
originalTask: task,
subTasks: [],
analysis: await this.atomicDetector.analyzeTask(task, context),
depth
};
}
this.emitTaskProgress(task.id, 'decomposition', 'progress', 25, 'Analyzing task atomicity');
const analysis = await this.atomicDetector.analyzeTask(task, context);
if (analysis.isAtomic && analysis.confidence >= this.rddConfig.minConfidence) {
logger.info({ taskId: task.id, confidence: analysis.confidence }, 'Task determined to be atomic');
this.circuitBreaker.recordSuccess(task.id);
this.completeOperation(operationId);
return {
success: true,
isAtomic: true,
originalTask: task,
subTasks: [],
analysis,
depth
};
}
this.emitTaskProgress(task.id, 'decomposition', 'progress', 50, 'Using epic-first decomposition strategy');
logger.info({ taskId: task.id }, 'Attempting epic-first decomposition as primary strategy');
try {
const epicResult = await this.decomposeTaskWithEpics(task, context, depth);
if (epicResult.success && epicResult.subTasks && epicResult.subTasks.length > 0) {
logger.info({
taskId: task.id,
epicCount: epicResult.subTasks.length,
functionalAreas: [...new Set(epicResult.subTasks.map(t => t.functionalArea))]
}, 'Epic-first decomposition successful');
this.circuitBreaker.recordSuccess(task.id);
this.completeOperation(operationId);
return epicResult;
}
}
catch (error) {
logger.warn({ taskId: task.id, error: error instanceof Error ? error.message : 'Unknown error' }, 'Epic-first decomposition failed, falling back to traditional approach');
}
this.emitTaskProgress(task.id, 'decomposition', 'progress', 60, 'Falling back to traditional decomposition');
const subTasks = await this.splitTask(task, context, analysis);
if (subTasks.length === 0) {
logger.warn({ taskId: task.id }, 'No sub-tasks generated, treating as atomic');
this.circuitBreaker.recordFailure(task.id);
this.completeOperation(operationId);
return {
success: true,
isAtomic: true,
originalTask: task,
subTasks: [],
analysis,
depth
};
}
this.emitTaskProgress(task.id, 'decomposition', 'progress', 75, 'Processing decomposed sub-tasks');
const processedSubTasks = await this.processDecomposedTasks(subTasks, context, depth + 1);
logger.info({
taskId: task.id,
decomposedTaskCount: processedSubTasks.length,
depth,
operationId
}, 'RDD decomposition completed');
this.circuitBreaker.recordSuccess(task.id);
this.completeOperation(operationId);
return {
success: true,
isAtomic: false,
originalTask: task,
subTasks: processedSubTasks,
analysis,
depth
};
}
catch (error) {
this.completeOperation(operationId);
return this.handleDecompositionFailure(task, error instanceof Error ? error : 'Unknown error', {
depth,
isRecursive: false,
operationId
});
}
}
async splitTask(task, context, analysis) {
logger.debug({ taskId: task.id }, 'Splitting task into sub-tasks');
try {
let splitPrompt = this.buildSplitPrompt(task, context, analysis);
const systemPrompt = await getPrompt('decomposition');
const timeoutManager = getTimeoutManager();
let response = '';
let subTasks = [];
let retryCount = 0;
const maxRetries = 2;
const llmCallStartTime = Date.now();
while (retryCount <= maxRetries) {
try {
response = await timeoutManager.raceWithTimeout('llmRequest', performFormatAwareLlmCall(splitPrompt, systemPrompt, this.config, 'task_decomposition', 'json', undefined, retryCount === 0 ? 0.2 : 0.3 + (retryCount * 0.1)));
subTasks = await this.parseSplitResponse(response, task);
this.emitTaskProgress(task.id, 'decomposition', 'progress', 60, `Successfully parsed ${subTasks.length} sub-tasks from LLM response`);
const totalLlmTime = Date.now() - llmCallStartTime;
logger.info({
taskId: task.id,
retryCount,
totalLlmTime,
responseLength: response.length,
parsedTaskCount: subTasks.length,
performance: {
llmLatency: totalLlmTime,
retries: retryCount,
parseSuccess: true,
timestamp: new Date().toISOString()
}
}, 'LLM response parsing: successful task decomposition');
break;
}
catch (parseError) {
const parseErrorMessage = parseError instanceof Error ? parseError.message : 'Unknown parsing error';
const isContextualInsightsError = parseErrorMessage.includes('contextualInsights without tasks array');
const isEmptyTasksError = parseErrorMessage.includes('Empty tasks array');
const isInvalidFormatError = parseErrorMessage.includes('Invalid response format');
const isAnalysisOnlyError = parseErrorMessage.includes('analysis fields') && parseErrorMessage.includes('without tasks array');
if ((isContextualInsightsError || isEmptyTasksError || isInvalidFormatError || isAnalysisOnlyError) && retryCount < maxRetries) {
retryCount++;
const partialLlmTime = Date.now() - llmCallStartTime;
logger.warn({
taskId: task.id,
retryCount,
maxRetries,
parseError: parseErrorMessage,
retryReason: 'LLM response parsing failed, retrying with different temperature',
partialLlmTime,
responseLength: response?.length || 0,
errorType: isContextualInsightsError ? 'contextual_insights_only' :
isEmptyTasksError ? 'empty_tasks_array' :
isAnalysisOnlyError ? 'analysis_only' : 'invalid_format',
performance: {
llmLatency: partialLlmTime,
retries: retryCount,
parseSuccess: false,
errorCategory: isContextualInsightsError ? 'contextual_insights_only' :
isEmptyTasksError ? 'empty_tasks_array' : 'invalid_format',
timestamp: new Date().toISOString()
}
}, 'LLM response parsing: retry attempt due to parsing failure');
if (retryCount === 1) {
if (isAnalysisOnlyError) {
splitPrompt = `${splitPrompt}\n\nIMPORTANT: You MUST respond with a JSON object containing BOTH "contextualInsights" AND "tasks" array. Do not respond with only codebaseAlignment, researchIntegration, technologySpecifics, estimationFactors. The response must include actionable tasks in a "tasks" array.`;
}
else {
splitPrompt = `${splitPrompt}\n\nIMPORTANT: You MUST respond with a JSON object containing a "tasks" array. Do not respond with only contextualInsights or analysis. The response must include actionable tasks.`;
}
}
else if (retryCount === 2) {
splitPrompt = `${splitPrompt}\n\nCRITICAL: Respond ONLY with valid JSON format: {"contextualInsights": {"codebaseAlignment": "...", "researchIntegration": "...", "technologySpecifics": "...", "estimationFactors": "..."}, "tasks": [{"title": "...", "description": "...", "type": "...", "priority": "...", "estimatedHours": 0.1}]}. No additional text or analysis.`;
}
continue;
}
const finalLlmTime = Date.now() - llmCallStartTime;
logger.error({
taskId: task.id,
retryCount,
maxRetries,
finalLlmTime,
parseError: parseErrorMessage,
errorType: isContextualInsightsError ? 'contextual_insights_only' :
isEmptyTasksError ? 'empty_tasks_array' :
isAnalysisOnlyError ? 'analysis_only' :
isInvalidFormatError ? 'invalid_format' : 'other',
performance: {
llmLatency: finalLlmTime,
retries: retryCount,
parseSuccess: false,
finalFailure: true,
timestamp: new Date().toISOString()
}
}, 'LLM response parsing: final failure, falling back to atomic task generation');
throw parseError;
}
}
const validatedSubTasks = this.validateDecomposedTasks(subTasks, task);
logger.info({
taskId: task.id,
decomposedTaskCount: validatedSubTasks.length
}, 'Task split completed');
return validatedSubTasks;
}
catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
const isTimeout = errorMessage.includes('timeout') || errorMessage.includes('timed out');
const errorContext = {
err: error,
taskId: task.id,
operation: 'task_splitting',
taskTitle: task.title,
taskType: task.type,
taskPriority: task.priority,
estimatedHours: task.estimatedHours,
projectId: task.projectId,
epicId: task.epicId,
contextSize: context ? {
languagesCount: context.languages?.length || 0,
frameworksCount: context.frameworks?.length || 0,
complexity: context.complexity
} : null,
stackTrace: error instanceof Error ? error.stack : undefined
};
if (isTimeout) {
logger.error({
...errorContext,
timeout: true,
timeoutType: 'llm_call'
}, 'Task splitting timed out - LLM call exceeded timeout limit');
}
else {
logger.error(errorContext, 'Failed to split task - comprehensive error context captured');
}
return [];
}
}
async processDecomposedTasks(decomposedTasks, context, depth) {
const processedTasks = [];
for (const task of decomposedTasks) {
if (!this.circuitBreaker.canAttempt(task.id)) {
const stats = this.circuitBreaker.getStats(task.id);
logger.warn({
taskId: task.id,
depth,
circuitBreakerStats: stats,
isRecursiveCall: true
}, 'Circuit breaker preventing recursive decomposition attempt');
processedTasks.push(task);
continue;
}
const quickAnalysis = await this.atomicDetector.analyzeTask(task, context);
if (quickAnalysis.isAtomic && quickAnalysis.confidence >= this.rddConfig.minConfidence) {
this.circuitBreaker.recordSuccess(task.id);
processedTasks.push(task);
}
else if (depth < this.rddConfig.maxDepth) {
this.circuitBreaker.recordAttempt(task.id);
try {
const timeoutManager = getTimeoutManager();
const decompositionResult = await timeoutManager.raceWithTimeout('recursiveTaskDecomposition', this.decomposeTask(task, context, depth));
if (decompositionResult.success && decompositionResult.subTasks.length > 0) {
this.circuitBreaker.recordSuccess(task.id);
processedTasks.push(...decompositionResult.subTasks);
}
else {
this.circuitBreaker.recordFailure(task.id);
logger.warn({
taskId: task.id,
depth,
isRecursiveCall: true,
decompositionSuccess: decompositionResult.success,
subTaskCount: decompositionResult.subTasks?.length || 0
}, 'Recursive decomposition failed to generate sub-tasks, keeping original task as atomic');
processedTasks.push(task);
}
}
catch (error) {
this.circuitBreaker.recordFailure(task.id);
logger.warn({
err: error,
taskId: task.id,
depth,
isRecursiveCall: true
}, 'Recursive task decomposition failed or timed out, keeping task as atomic');
processedTasks.push(task);
}
}
else {
logger.info({
taskId: task.id,
depth,
maxDepth: this.rddConfig.maxDepth
}, 'Maximum decomposition depth reached, keeping task as-is');
processedTasks.push(task);
}
}
return processedTasks;
}
async decomposeTaskWithEpics(task, context, depth = 0) {
const operationId = `epic-decompose-${task.id}-${Date.now()}`;
this.trackOperation(operationId, 'epic_decomposition', task.id);
logger.info({ taskId: task.id, depth, operationId }, 'Starting two-phase epic-aware decomposition');
try {
this.emitTaskProgress(task.id, 'decomposition', 'progress', 25, 'Identifying functional areas and epics');
const epicStructure = await this.identifyEpicStructure(task, context);
if (!epicStructure || epicStructure.length === 0) {
logger.warn({ taskId: task.id }, 'No epics identified, falling back to traditional decomposition');
return await this.decomposeTask(task, context, depth);
}
this.emitTaskProgress(task.id, 'decomposition', 'progress', 50, 'Generating atomic tasks within epic boundaries');
const allSubTasks = [];
for (const epic of epicStructure) {
const epicTasks = await this.generateTasksForEpic(task, epic, context);
allSubTasks.push(...epicTasks);
}
if (allSubTasks.length === 0) {
logger.warn({ taskId: task.id }, 'No tasks generated in epic-aware decomposition, falling back to traditional');
return await this.decomposeTask(task, context, depth);
}
this.emitTaskProgress(task.id, 'decomposition', 'progress', 75, 'Processing epic-aware sub-tasks');
const processedSubTasks = await this.processDecomposedTasks(allSubTasks, context, depth + 1);
logger.info({
taskId: task.id,
epicsIdentified: epicStructure.length,
tasksGenerated: processedSubTasks.length,
depth,
operationId
}, 'Two-phase epic-aware decomposition completed');
this.completeOperation(operationId);
return {
success: true,
isAtomic: false,
originalTask: task,
subTasks: processedSubTasks,
analysis: {
isAtomic: false,
confidence: 0.95,
reasoning: 'Successfully decomposed using two-phase epic-aware strategy',
estimatedHours: processedSubTasks.reduce((sum, t) => sum + t.estimatedHours, 0),
complexityFactors: ['epic_based_decomposition', 'functional_area_grouping'],
recommendations: ['Tasks organized by epic boundaries', 'Natural feature grouping applied']
},
depth
};
}
catch (error) {
this.completeOperation(operationId);
logger.warn({
err: error,
taskId: task.id,
depth
}, 'Epic-aware decomposition failed, falling back to traditional approach');
return await this.decomposeTask(task, context, depth);
}
}
async identifyEpicStructure(task, context) {
logger.debug({ taskId: task.id }, 'Identifying epic structure for task');
const epicPrompt = `Analyze this task and identify the natural functional areas and epic boundaries:
TASK TO ANALYZE:
- Title: ${task.title}
- Description: ${task.description}
- Type: ${task.type}
PROJECT CONTEXT:
- Languages: ${context.languages?.join(', ') || 'not specified'}
- Frameworks: ${context.frameworks?.join(', ') || 'not specified'}
- Build Tools: ${context.buildTools?.join(', ') || 'not specified'}
Identify all major functional areas (epics) needed for this work. Create as many epics as necessary to properly organize the project - no artificial limits.
VALID FUNCTIONAL AREAS (choose from these only):
- authentication: User login, security, and access control features
- user-management: User profiles, preferences, and account management
- content-management: Content creation, editing, and organization
- data-management: Data storage, retrieval, and processing
- integration: External API connections and third-party services
- admin: Administrative functions and system configuration
- ui-components: User interface components and interactions
- performance: Optimization, caching, and efficiency improvements
- frontend: Client-side logic, React/Vue/Angular components, UI state management
- backend: Server-side logic, APIs, business rules, middleware, services
- database: Schema design, migrations, queries, indexes, data optimization
EPIC COUNT GUIDANCE (based on project complexity):
- Small features (1-2 days work): 1-3 epics
- Medium features (1 week work): 3-7 epics
- Large features (2-4 weeks work): 7-15 epics
- Enterprise features (1+ months work): 15-30+ epics
Multiple epics can share the same functionalArea if they represent different aspects of that area.
For example: "User Authentication System" and "OAuth Integration" can both use functionalArea: "authentication".
IMPORTANT: You MUST respond with valid JSON only. Never return plain text explanations.
If unable to identify epics, return {"epics": []} instead of plain text.
The functionalArea field MUST be one of the valid functional areas listed above.
Respond with valid JSON in exactly this format:
{
"epics": [
{
"name": "Authentication System",
"functionalArea": "authentication",
"description": "User login and security features",
"priority": "high",
"estimatedComplexity": "medium"
}
]
}`;
try {
const timeoutManager = getTimeoutManager();
const response = await timeoutManager.raceWithTimeout('llmRequest', performFormatAwareLlmCall(epicPrompt, 'Identify functional areas and epic boundaries for task decomposition', this.config, 'epic_identification', 'json', undefined, 0.1));
const parsedResponse = this.parseEpicIdentificationResponse(response);
return this.validateAndTransformEpics(parsedResponse.epics);
}
catch (error) {
logger.warn({ err: error, taskId: task.id }, 'Failed to identify epic structure');
return [];
}
}
async generateTasksForEpic(originalTask, epic, _context) {
logger.debug({
taskId: originalTask.id,
epicName: epic.name,
functionalArea: epic.functionalArea
}, 'Generating tasks for epic');
const epicTaskPrompt = `Generate atomic tasks specifically for this epic:
EPIC CONTEXT:
- Epic Name: ${epic.name}
- Functional Area: ${epic.functionalArea}
- Description: ${epic.description}
- Priority: ${epic.priority}
ORIGINAL TASK:
- Title: ${originalTask.title}
- Description: ${originalTask.description}
- Type: ${originalTask.type}
Generate ALL atomic tasks needed to fully implement this epic. Do not limit the number - create as many as necessary.
Each task should be 5-10 minutes of work and truly atomic (single responsibility, one file change).
TASK COUNT GUIDANCE (based on epic complexity):
- Simple epics (basic CRUD, simple UI): 5-15 tasks
- Medium complexity epics (auth flows, integrations): 15-40 tasks
- High complexity epics (complex features, systems): 40-100 tasks
The number of tasks should reflect the actual work needed, not arbitrary limits.
Respond with valid JSON only using the enhanced format from the decomposition prompt:
{
"tasks": [
{
"title": "Create authentication middleware",
"description": "...",
"type": "development",
"priority": "high",
"estimatedHours": 0.15,
"acceptanceCriteria": ["Middleware validates JWT tokens"],
"functionalArea": "${epic.functionalArea}",
"epicContext": {
"suggestedEpicName": "${epic.name}",
"epicDescription": "${epic.description}",
"epicJustification": "Core component of ${epic.functionalArea} functionality"
}
}
]
}`;
try {
const timeoutManager = getTimeoutManager();
const response = await timeoutManager.raceWithTimeout('llmRequest', performFormatAwareLlmCall(epicTaskPrompt, 'Generate atomic tasks for specific epic in two-phase decomposition', this.config, 'epic_task_generation', 'json', undefined, 0.1));
const parsedResponse = this.parseTaskGenerationResponse(response);
return this.validateAndTransformTasks(parsedResponse.tasks, originalTask, epic);
}
catch (error) {
logger.warn({
err: error,
taskId: originalTask.id,
epicName: epic.name
}, 'Failed to generate tasks for epic');
return [];
}
}
parseEpicIdentificationResponse(jsonResponse) {
try {
let parsedJson;
try {
parsedJson = JSON.parse(jsonResponse);
}
catch {
const jsonMatch = jsonResponse.match(/\{[\s\S]*\}/);
if (!jsonMatch) {
if (jsonResponse.toLowerCase().includes('based on') || jsonResponse.toLowerCase().includes('unable to')) {
logger.warn('LLM returned plain text for epic identification - treating as empty epic list');
return { epics: [] };
}
throw new Error('No JSON found in epic identification response');
}
parsedJson = JSON.parse(jsonMatch[0]);
}
if (typeof parsedJson !== 'object' || parsedJson === null) {
throw new Error('Response must be an object');
}
const obj = parsedJson;
if (!('epics' in obj) || !Array.isArray(obj.epics)) {
logger.warn({
responseKeys: Object.keys(obj),
responseSnippet: JSON.stringify(obj).substring(0, 100)
}, 'Epic identification response missing epics array');
return { epics: [] };
}
const epics = obj.epics;
const validatedEpics = epics.map((epic, index) => {
if (typeof epic !== 'object' || epic === null) {
throw new Error(`Epic at index ${index} must be an object`);
}
const epicObj = epic;
if (typeof epicObj.name !== 'string') {
throw new Error(`Epic at index ${index} must have string name`);
}
if (typeof epicObj.functionalArea !== 'string') {
throw new Error(`Epic at index ${index} must have string functionalArea`);
}
if (typeof epicObj.description !== 'string') {
throw new Error(`Epic at index ${index} must have string description`);
}
return {
name: epicObj.name,
functionalArea: epicObj.functionalArea,
description: epicObj.description,
priority: typeof epicObj.priority === 'string' ? epicObj.priority : undefined,
estimatedComplexity: typeof epicObj.estimatedComplexity === 'string' ? epicObj.estimatedComplexity : undefined
};
});
return { epics: validatedEpics };
}
catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Unknown parsing error';
throw new Error(`Failed to parse epic identification response: ${errorMessage}`);
}
}
parseTaskGenerationResponse(jsonResponse) {
try {
const parsed = JSON.parse(jsonResponse);
if (typeof parsed !== 'object' || parsed === null) {
throw new Error('Response must be an object');
}
const obj = parsed;
if (!('tasks' in obj) || !Array.isArray(obj.tasks)) {
throw new Error('Response must contain tasks array');
}
const tasks = obj.tasks;
const validatedTasks = tasks.map((task, index) => {
if (typeof task !== 'object' || task === null) {
throw new Error(`Task at index ${index} must be an object`);
}
const taskObj = task;
if (typeof taskObj.title !== 'string') {
throw new Error(`Task at index ${index} must have string title`);
}
if (typeof taskObj.description !== 'string') {
throw new Error(`Task at index ${index} must have string description`);
}
const validateStringArray = (arr, fieldName) => {
if (arr === undefined)
return undefined;
if (!Array.isArray(arr)) {
throw new Error(`Task at index ${index}: ${fieldName} must be an array`);
}
return arr.map((item, arrIndex) => {
if (typeof item !== 'string') {
throw new Error(`Task at index ${index}: ${fieldName}[${arrIndex}] must be a string`);
}
return item;
});
};
let epicContext = undefined;
if (taskObj.epicContext !== undefined) {
if (typeof taskObj.epicContext !== 'object' || taskObj.epicContext === null) {
throw new Error(`Task at index ${index}: epicContext must be an object`);
}
const epicCtx = taskObj.epicContext;
epicContext = {
suggestedEpicName: typeof epicCtx.suggestedEpicName === 'string' ? epicCtx.suggestedEpicName : undefined,
epicDescription: typeof epicCtx.epicDescription === 'string' ? epicCtx.epicDescription : undefined,
epicJustification: typeof epicCtx.epicJustification === 'string' ? epicCtx.epicJustification : undefined
};
}
return {
title: taskObj.title,
description: taskObj.description,
type: typeof taskObj.type === 'string' ? taskObj.type : undefined,
priority: typeof taskObj.priority === 'string' ? taskObj.priority : undefined,
estimatedHours: typeof taskObj.estimatedHours === 'number' ? taskObj.estimatedHours : undefined,
acceptanceCriteria: validateStringArray(taskObj.acceptanceCriteria, 'acceptanceCriteria'),
tags: validateStringArray(taskObj.tags, 'tags'),
dependencies: validateStringArray(taskObj.dependencies, 'dependencies'),
filePaths: validateStringArray(taskObj.filePaths, 'filePaths'),
functionalArea: typeof taskObj.functionalArea === 'string' ? taskObj.functionalArea : undefined,
epicContext
};
});
return { tasks: validatedTasks };
}
catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Unknown parsing error';
throw new Error(`Failed to parse task generation response: ${errorMessage}`);
}
}
validateAndTransformEpics(rawEpics) {
const validFunctionalAreas = [
'authentication', 'user-management', 'content-management', 'data-management',
'integration', 'admin', 'ui-components', 'performance'
];
const validPriorities = ['low', 'medium', 'high', 'critical'];
const validComplexities = ['low', 'medium', 'high'];
return rawEpics.map((epic) => {
let functionalArea = 'integration';
if (validFunctionalAreas.includes(epic.functionalArea)) {
functionalArea = epic.functionalArea;
}
else {
logger.warn({
epicName: epic.name,
invalidFunctionalArea: epic.functionalArea,
validAreas: validFunctionalAreas,
defaultUsed: 'integration'
}, 'Invalid functional area returned by LLM, using default fallback');
}
let priority = 'medium';
if (epic.priority && validPriorities.includes(epic.priority)) {
priority = epic.priority;
}
let estimatedComplexity = 'medium';
if (epic.estimatedComplexity && validComplexities.includes(epic.estimatedComplexity)) {
estimatedComplexity = epic.estimatedComplexity;
}
return {
name: epic.name,
functionalArea,
description: epic.description,
priority,
estimatedComplexity
};
});
}
validateAndTransformTasks(rawTasks, originalTask, epic) {
const validTaskTypes = ['development', 'testing', 'documentation', 'research'];
const validPriorities = ['low', 'medium', 'high', 'critical'];
return rawTasks.map((taskData, index) => {
const type = (taskData.type && validTaskTypes.includes(taskData.type))
? taskData.type
: 'development';
const priority = (taskData.priority && validPriorities.includes(taskData.priority))
? taskData.priority
: epic.priority;
const estimatedHours = (typeof taskData.estimatedHours === 'number' && taskData.estimatedHours > 0)
? Math.min(taskData.estimatedHours, 0.17)
: 0.15;
const now = new Date();
return {
id: `${originalTask.id}-epic-${index + 1}`,
title: taskData.title,
description: taskData.description,
status: 'pending',
priority,
type,
functionalArea: epic.functionalArea,
estimatedHours,
actualHours: undefined,
epicId: `${epic.functionalArea}-epic`,
projectId: originalTask.projectId,
dependencies: taskData.dependencies ? [...taskData.dependencies] : [],
dependents: [],
filePaths: taskData.filePaths ? [...taskData.filePaths] : [],
acceptanceCriteria: taskData.acceptanceCriteria ? [...taskData.acceptanceCriteria] : [],
testingRequirements: {
unitTests: [],
integrationTests: [],
performanceTests: [],
coverageTarget: 80
},
performanceCriteria: {
responseTime: undefined,
memoryUsage: undefined,
throughput: undefined
},
qualityCriteria: {
codeQuality: [],
documentation: [],
typeScript: true,
eslint: true
},
integrationCriteria: {
compatibility: [],
patterns: []
},
validationMethods: {
automated: [],
manual: []
},
assignedAgent: undefined,
executionContext: undefined,
createdAt: now,
updatedAt: now,
startedAt: undefined,
completedAt: undefined,
createdBy: 'decomposition-service',
tags: taskData.tags ? [...taskData.tags] : [epic.functionalArea],
metadata: {
createdAt: now,
updatedAt: now,
createdBy: 'decomposition-service',
tags: taskData.tags ? [...taskData.tags] : [epic.functionalArea]
}
};
});
}
buildSplitPrompt(task, context, analysis) {
return `Decompose the following non-atomic task into smaller, more manageable sub-tasks:
ORIGINAL TASK:
- Title: ${task.title}
- Description: ${task.description}
- Type: ${task.type}
- Priority: ${task.priority}
- Estimated Hours: ${task.estimatedHours}
- File Paths: ${(task.filePaths || []).join(', ')}
- Acceptance Criteria: ${(task.acceptanceCriteria || []).join('; ')}
ATOMICITY ANALYSIS:
- Is Atomic: ${analysis.isAtomic}
- Confidence: ${analysis.confidence}
- Reasoning: ${analysis.reasoning}
- Complexity Factors: ${(analysis.complexityFactors || []).join(', ')}
- Recommendations: ${(analysis.recommendations || []).join('; ')}
PROJECT CONTEXT:
- Languages: ${(context.languages && context.languages.length > 0 ? context.languages : ['unknown']).join(', ')}
- Frameworks: ${(context.frameworks && context.frameworks.length > 0 ? context.frameworks : ['unknown']).join(', ')}
- Tools: ${(context.tools || []).join(', ')}
- Complexity: ${context.complexity || 'unknown'}
EPIC CONSTRAINT:
- This task belongs to an epic with a maximum of ${this.rddConfig.epicTimeLimit} hours total (configurable limit supporting realistic enterprise projects)
- All generated tasks combined should not exceed the original task's estimated hours
- Aim for comprehensive task breakdown that supports realistic project scope
ATOMIC TASK REQUIREMENTS (MANDATORY):
1. ā±ļø DURATION: Each task must take 5-10 minutes maximum (0.08-0.17 hours)
2. šÆ SINGLE ACTION: Each task must involve exactly ONE specific action
3. š ONE CRITERIA: Each task must have exactly ONE acceptance criteria
4. š SINGLE FOCUS: Each task must focus on ONE thing only
5. š SIMPLICITY: Each task must be simple and straightforward
6. ā” IMMEDIATE: Each task can be started and completed immediately
7. š§ ACTIONABLE: Each task must be a concrete, specific action
TASK GENERATION REQUIREMENTS:
1. Create 2-${this.rddConfig.maxSubTasks} TRULY ATOMIC tasks
2. Each task MUST be completable in 5-10 minutes (0.08-0.17 hours)
3. Each task MUST have exactly ONE acceptance criteria
4. Each task MUST focus on ONE specific action
5. Tasks should be as independent as possible
6. Maintain clear logical progression
7. Preserve the original task's intent and scope
8. Use specific, actionable titles
9. Provide detailed but focused descriptions
10. Support comprehensive project coverage within the ${this.rddConfig.epicTimeLimit}-hour epic scope
VALIDATION CHECKLIST (Apply to each task):
ā” Takes 5-10 minutes maximum?
ā” Involves exactly ONE action?
ā” Has exactly ONE acceptance criteria?
ā” Focuses on ONE thing only?
ā” Is simple and straightforward?
ā” Can be started immediately?
ā” Cannot be broken down into smaller tasks?
Provide your task decomposition in the following JSON format:
{
"tasks": [
{
"title": "Specific, actionable title (verb + object)",
"description": "Detailed description of the single action to take",
"type": "development|testing|documentation|research",
"priority": "low|medium|high|critical",
"estimatedHours": 0.08-0.17 (5-10 minutes in decimal hours),
"filePaths": ["specific file to modify"],
"acceptanceCriteria": ["ONE specific, testable outcome"],
"tags": ["relevant", "tags"],
"dependencies": ["T0001"] // Only if absolutely necessary
}
]
}
CRITICAL REMINDER:
- Use "tasks" not "subtasks" in your response
- If any task takes more than 10 minutes, break it down further!
- Ensure total time of all tasks doesn't exceed epic's 8-hour limit
- ALWAYS respond with valid JSON, never plain text
- If unable to decompose, return {"tasks": []} with an explanation in the task description`;
}
validateTaskStructure(task) {
const requiredFields = ['title', 'description', 'type', 'priority', 'estimatedHours'];
return requiredFields.every(field => Object.prototype.hasOwnProperty.call(task, field) && task[field] != null);
}
validateResponseStructure(parsed) {
const analysisFields = ['codebaseAlignment', 'researchIntegration', 'technologySpecifics', 'estimationFactors'];
const hasAnalysisFields = analysisFields.some(field => parsed[field]);
if (hasAnalysisFields && !parsed.tasks && !parsed.subTasks && !parsed.title && !parsed.contextualInsights) {
return {
isValid: false,
error: `LLM returned analysis fields (${analysisFields.filter(f => parsed[f]).join(', ')}) without tasks array. This is a malformed response.`,
canConvert: true
};
}
if (parsed.contextualInsights && !parsed.tasks && !parsed.subTasks && !parsed.title) {
return {
isValid: false,
error: `LLM returned contextualInsights without tasks array. This suggests the task may already be atomic or the LLM failed to decompose it properly. Response keys: ${Object.keys(parsed).join(', ')}`,
canConvert: false
};
}
if (parsed.tasks && Array.isArray(parsed.tasks)) {
if (parsed.tasks.length === 0) {
return { isValid: false, error: `Empty tasks array received from LLM` };
}
const invalidTasks = parsed.tasks.filter((task) => !this.validateTaskStructure(task));
if (invalidTasks.length > 0) {
const missingFields = this.getMissingFields(invalidTasks[0]);
return { isValid: false, error: `Invalid task structure in tasks array: missing required fields [${missingFields.join(', ')}] in task "${invalidTasks[0].title || 'untitled'}"` };
}
return { isValid: true };
}
if (parsed.subTasks && Array.isArray(parsed.subTasks)) {
if (parsed.subTasks.length === 0) {
return { isValid: false, error: `Empty subTasks array received from LLM` };
}
const invalidTasks = parsed.subTasks.filter((task) => !this.validateTaskStructure(task));
if (invalidTasks.length > 0) {
const missingFields = this.getMissingFields(invalidTasks[0]);
return { isValid: false, error: `Invalid task structure in subTasks array: missing required fields [${missingFields.join(', ')}] in task "${invalidTasks[0].title || 'untitled'}"` };
}
return { isValid: true };
}
if (parsed.title && parsed.description) {
if (!this.validateTaskStructure(parsed)) {
const missingFields = this.getMissingFields(parsed);
return { isValid: false, error: `Invalid single task structure: missing required fields [${missingFields.join(', ')}]` };
}
return { isValid: true };
}
const responseKeys = Object.keys(parsed);
const hasContextualInsights = !!parsed.contextualInsights;
const hasAnalysis = !!parsed.analysis;
const hasRecommendations = !!parsed.recommendations;
return {
isValid: false,
error: `Invalid response format: no tasks array or single task found. Response contains keys: [${responseKeys.join(', ')}]. Has contextualInsights: ${hasContextualInsights}, analysis: ${hasAnalysis}, recommendations: ${hasRecommendations}. Expected "tasks" array or single task object with title/description.`
};
}
convertAnalysisOnlyResponse(parsed, originalTask) {
logger.info({
taskId: originalTask.id,
analysisFields: Object.keys(parsed).filter(k => ['codebaseAlignment', 'researchIntegration', 'technologySpecifics', 'estimationFactors'].includes(k))
}, 'Converting analysis-only response to atomic task - LLM failed to provide proper decomposition');
const contextualInsights = {
codebaseAlignment: parsed.codebaseAlignment || 'No codebase alignment analysis provided',
researchIntegration: parsed.researchIntegration || 'No research integration analysis provided',
technologySpecifics: parsed.technologySpecifics || 'No technology specifics provided',
estimationFactors: parsed.estimationFactors || 'No estimation factors provided'
};
const atomicTask = {
id: `${originalTask.id}-atomic-01`,
title: originalTask.title,
description: `${originalTask.description}\n\nAnalysis: ${contextualInsights.codebaseAlignment}`,
type: originalTask.type,
functionalArea: originalTask.functionalArea || 'data-management',
priority: originalTask.priority,
status