yoda-mcp
Version:
Intelligent Planning MCP with Optional Dependencies and Graceful Fallbacks - wise planning through the Force of lean excellence
321 lines (285 loc) • 12.2 kB
text/typescript
/**
* SmartEnhancementEngine Tests
*
* RED PHASE: Write failing tests for objective quality improvement
* These tests define what real enhancement means (not fake iterations)
*/
import { SmartEnhancementEngine } from '../src/smart/smart-enhancement-engine';
import { Plan, QualityGap, EnhancementResult, Improvement } from '../src/types';
describe('SmartEnhancementEngine', () => {
let engine: SmartEnhancementEngine;
beforeEach(() => {
engine = new SmartEnhancementEngine({
maxIterations: 3,
improvementThreshold: 5, // Stop if improvement < 5 points
validationStrict: true
});
});
// RED: This test MUST fail - SmartEnhancementEngine doesn't exist yet
describe('Objective Gap Analysis', () => {
test('should identify missing acceptance criteria', () => {
const planWithMissingCriteria: Plan = {
requirements: [
{
id: 'req_1',
description: 'User authentication',
priority: 'must-have',
acceptanceCriteria: [] // MISSING - should be detected
},
{
id: 'req_2',
description: 'Payment processing',
priority: 'must-have',
acceptanceCriteria: ['Process credit cards', 'Handle failures'] // Complete
}
],
tasks: [],
timeline: { totalEstimate: '2 weeks', phases: [], criticalPath: [] },
risks: [],
metadata: { planId: 'test', generatedAt: new Date(), estimatedReadTime: '5 min', complexity: 'moderate' }
};
const analysis = engine.analyzeActualGaps(planWithMissingCriteria);
expect(analysis.gaps).toHaveLength(1);
expect(analysis.gaps[0].type).toBe('missing_acceptance_criteria');
expect(analysis.gaps[0].severity).toBe('high');
expect(analysis.gaps[0].affectedItems).toContain('req_1');
expect(analysis.gaps[0].affectedItems).not.toContain('req_2');
expect(analysis.score).toBeLessThan(100); // Quality should be reduced
});
test('should identify circular dependencies', () => {
const planWithCircularDeps: Plan = {
requirements: [],
tasks: [
{
id: 'task_a',
title: 'Task A',
description: 'Depends on Task B',
estimatedHours: 4,
skills: [],
dependencies: ['task_b'], // Circular: A -> B -> A
deliverable: 'Component A'
},
{
id: 'task_b',
title: 'Task B',
description: 'Depends on Task A',
estimatedHours: 4,
skills: [],
dependencies: ['task_a'], // Circular: B -> A -> B
deliverable: 'Component B'
}
],
timeline: { totalEstimate: '1 week', phases: [], criticalPath: [] },
risks: [],
metadata: { planId: 'circular', generatedAt: new Date(), estimatedReadTime: '3 min', complexity: 'simple' }
};
const analysis = engine.analyzeActualGaps(planWithCircularDeps);
expect(analysis.gaps.some(gap => gap.type === 'circular_dependencies')).toBe(true);
const circularGap = analysis.gaps.find(gap => gap.type === 'circular_dependencies');
expect(circularGap.severity).toBe('critical');
expect(circularGap.affectedItems).toContain('task_a');
expect(circularGap.affectedItems).toContain('task_b');
expect(analysis.score).toBeLessThan(70); // Major penalty for circular deps
});
test('should identify unmitigated high-risk tasks', () => {
const planWithUnmitigatedRisks: Plan = {
requirements: [],
tasks: [
{
id: 'risky_task',
title: 'Complex Integration',
description: 'Integrate with legacy system',
estimatedHours: 20,
complexity: 'high', // High complexity but no corresponding risk
skills: ['integration'],
dependencies: [],
deliverable: 'Integration layer'
}
],
timeline: { totalEstimate: '3 weeks', phases: [], criticalPath: [] },
risks: [], // No risks identified for high-complexity task
metadata: { planId: 'risky', generatedAt: new Date(), estimatedReadTime: '7 min', complexity: 'complex' }
};
const analysis = engine.analyzeActualGaps(planWithUnmitigatedRisks);
expect(analysis.gaps.some(gap => gap.type === 'unmitigated_risks')).toBe(true);
const riskGap = analysis.gaps.find(gap => gap.type === 'unmitigated_risks');
expect(riskGap.severity).toBe('medium');
expect(riskGap.affectedItems).toContain('risky_task');
});
test('should give high score to complete plans', () => {
const completePlan: Plan = {
requirements: [
{
id: 'req_complete',
description: 'Complete requirement with criteria',
priority: 'must-have',
acceptanceCriteria: ['Criteria 1', 'Criteria 2', 'Criteria 3']
}
],
tasks: [
{
id: 'task_complete',
title: 'Well-defined task',
description: 'Task with proper estimates and no circular deps',
estimatedHours: 8,
complexity: 'medium',
skills: ['development'],
dependencies: [],
deliverable: 'Working feature'
}
],
timeline: { totalEstimate: '1 week', phases: [], criticalPath: [] },
risks: [
{
id: 'risk_1',
description: 'Integration complexity may cause delays',
likelihood: 'medium',
impact: 'medium',
mitigation: 'Start with simple integration, iterate'
}
],
metadata: { planId: 'complete', generatedAt: new Date(), estimatedReadTime: '5 min', complexity: 'moderate' }
};
const analysis = engine.analyzeActualGaps(completePlan);
expect(analysis.gaps).toHaveLength(0);
expect(analysis.score).toBeGreaterThanOrEqual(95); // Nearly perfect score
});
});
// RED: This test MUST fail - improveUntilOptimal doesn't exist yet
describe('Smart Iteration Control', () => {
test('should stop iterating when no meaningful improvement possible', async () => {
const nearlyPerfectPlan: Plan = {
requirements: [
{
id: 'req_good',
description: 'Well-defined requirement',
priority: 'must-have',
acceptanceCriteria: ['Clear criteria']
}
],
tasks: [
{
id: 'task_good',
title: 'Good task',
description: 'Properly estimated task',
estimatedHours: 6,
complexity: 'low',
skills: ['coding'],
dependencies: [],
deliverable: 'Feature'
}
],
timeline: { totalEstimate: '1 week', phases: [], criticalPath: [] },
risks: [],
metadata: { planId: 'good', generatedAt: new Date(), estimatedReadTime: '4 min', complexity: 'simple' },
qualityScore: 96 // Already high quality
};
const result: EnhancementResult = await engine.improveUntilOptimal(nearlyPerfectPlan, 3);
// Should stop early since improvement would be < 5 points
expect(result.iterations.length).toBeLessThanOrEqual(1); // Should run zero or one iteration
expect(result.totalImprovement).toBeLessThan(5); // Minimal improvement
});
test('should continue iterating when meaningful improvement possible', async () => {
const problematicPlan: Plan = {
requirements: [
{
id: 'req_bad',
description: 'Vague requirement',
priority: 'must-have',
acceptanceCriteria: [] // Missing criteria
}
],
tasks: [
{
id: 'task_bad',
title: 'Unclear task',
description: 'Task without estimate',
estimatedHours: 0, // Missing estimate
skills: [],
dependencies: [],
deliverable: 'TBD' // Vague deliverable
}
],
timeline: { totalEstimate: 'unknown', phases: [], criticalPath: [] },
risks: [], // No risk analysis for problematic plan
metadata: { planId: 'bad', generatedAt: new Date(), estimatedReadTime: '2 min', complexity: 'simple' },
qualityScore: 30 // Low quality - lots of room for improvement
};
const result: EnhancementResult = await engine.improveUntilOptimal(problematicPlan, 3);
// Should run multiple iterations due to improvement potential
expect(result.iterations.length).toBeGreaterThan(1);
expect(result.totalImprovement).toBeGreaterThan(15); // Significant improvement
expect(result.finalPlan.qualityScore).toBeGreaterThan(problematicPlan.qualityScore);
});
});
// RED: This test MUST fail - applyTargetedFixes doesn't exist yet
describe('Targeted Fix Application', () => {
test('should add specific acceptance criteria to requirements missing them', () => {
const gaps: QualityGap[] = [
{
type: 'missing_acceptance_criteria',
severity: 'high',
affectedItems: ['req_1'],
fixAction: 'Add specific, measurable acceptance criteria'
}
];
const fixes = engine.identifyTargetedFixes(gaps);
expect(fixes).toHaveLength(1);
expect(fixes[0].type).toBe('add_acceptance_criteria');
expect(fixes[0].targetItems).toContain('req_1');
expect(fixes[0].expectedImpact).toBe('high');
expect(typeof fixes[0].implementation).toBe('function');
});
test('should resolve circular dependencies', () => {
const gaps: QualityGap[] = [
{
type: 'circular_dependencies',
severity: 'critical',
affectedItems: ['task_a', 'task_b'],
fixAction: 'Resolve circular dependencies in task order'
}
];
const fixes = engine.identifyTargetedFixes(gaps);
expect(fixes).toHaveLength(1);
expect(fixes[0].type).toBe('fix_dependencies');
expect(fixes[0].targetItems).toEqual(['task_a', 'task_b']);
expect(fixes[0].expectedImpact).toBe('critical');
});
test('should add time estimates to tasks missing them', () => {
const gaps: QualityGap[] = [
{
type: 'missing_estimates',
severity: 'medium',
affectedItems: ['task_no_estimate'],
fixAction: 'Add realistic time estimates based on task complexity'
}
];
const fixes = engine.identifyTargetedFixes(gaps);
expect(fixes).toHaveLength(1);
expect(fixes[0].type).toBe('add_estimates');
expect(fixes[0].targetItems).toContain('task_no_estimate');
expect(fixes[0].expectedImpact).toBe('medium');
});
});
// RED: This test MUST fail - calculateImprovementScore doesn't exist yet
describe('Real Improvement Measurement', () => {
test('should calculate improvement score based on actual fixes applied', () => {
const beforeScore = 40;
const afterScore = 75;
const appliedFixes = [
{ type: 'add_acceptance_criteria' as const, targetItems: ['req_1'], implementation: () => {}, expectedImpact: 'high' as const },
{ type: 'add_estimates' as const, targetItems: ['task_1'], implementation: () => {}, expectedImpact: 'medium' as const }
];
const improvementScore = engine.calculateImprovementScore(beforeScore, afterScore, appliedFixes);
expect(improvementScore).toBe(35); // afterScore - beforeScore
expect(improvementScore).toBeGreaterThan(0);
});
test('should return zero improvement when no meaningful changes made', () => {
const beforeScore = 85;
const afterScore = 86; // Minimal change
const appliedFixes: Improvement[] = []; // No fixes applied
const improvementScore = engine.calculateImprovementScore(beforeScore, afterScore, appliedFixes);
expect(improvementScore).toBe(1); // Very small improvement
});
});
});