lamplighter-mcp
Version:
An intelligent context engine for AI-assisted software development
166 lines (137 loc) • 7.33 kB
text/typescript
import * as fs from 'fs/promises';
import * as path from 'path';
import { FeatureSpecProcessor } from '../../src/modules/featureSpecProcessor';
import { AIService } from '../../src/services/aiService';
// Mock fs/promises
jest.mock('fs/promises');
const mockedFs = jest.mocked(fs);
// Mock AIService
jest.mock('../../src/services/aiService');
const mockedAIService = jest.mocked(AIService);
// Save original environment variables
const originalEnv = process.env;
describe('FeatureSpecProcessor', () => {
const defaultContextDir = './lamplighter_context';
const featureTasksDir = path.join(defaultContextDir, 'feature_tasks');
let processor: FeatureSpecProcessor;
beforeEach(() => {
jest.clearAllMocks();
// Mock environment variables
process.env = {
...originalEnv,
LAMPLIGHTER_CONTEXT_DIR: defaultContextDir
};
// Mock fs implementations
mockedFs.mkdir.mockResolvedValue(undefined);
mockedFs.writeFile.mockResolvedValue(undefined);
// Mock AI service
mockedAIService.generateText.mockResolvedValue('- [ ] Default task'); // Default mock response
processor = new FeatureSpecProcessor();
// Spy on private methods we want to test or verify calls to
jest.spyOn(processor as any, 'validateAndCleanTaskList').mockImplementation((md) => md); // Default pass-through
jest.spyOn(processor as any, 'formatFinalMarkdown').mockImplementation((_spec, tasks, _id) => tasks); // Default pass-through
});
afterAll(() => {
process.env = originalEnv;
});
it('should initialize and create directories', async () => {
// Re-initialize to test constructor logic
const newProcessor = new FeatureSpecProcessor();
expect(mockedFs.mkdir).toHaveBeenCalledWith(defaultContextDir, { recursive: true });
expect(mockedFs.mkdir).toHaveBeenCalledWith(featureTasksDir, { recursive: true });
});
describe('deriveFeatureId', () => {
it.each([
['https://example.atlassian.net/wiki/spaces/SPACE/pages/123/Feature+One', 'feature_one'],
['https://example.com/some/path/FeatureTwo', 'featuretwo'],
['Feature Three with Spaces', 'feature_three_with_spaces'],
['feature-four-with-hyphens', 'feature_four_with_hyphens'],
['feature_5_with_underscores', 'feature_5_with_underscores'],
['Invalid URL Chars *&', 'invalid_url_chars'],
['http://hostonly.com', 'hostonly']
])('should derive ID for: $s', (input, expected) => {
expect(processor.deriveFeatureId(input)).toEqual(expected);
});
it('should derive fallback ID for empty string', () => {
expect(processor.deriveFeatureId('')).toMatch(/^feature_\d+$/);
});
});
describe('validateAndCleanTaskList', () => {
// Restore original implementation for this suite
beforeEach(() => {
(processor as any).validateAndCleanTaskList.mockRestore();
});
it('should return cleaned valid markdown', () => {
const raw = ' - [ ] Task 1 \n\n- [ ] Task 2\n - [ ] Task 3 ';
const expected = '- [ ] Task 1\n- [ ] Task 2\n- [ ] Task 3';
expect((processor as any).validateAndCleanTaskList(raw)).toBe(expected);
});
it('should throw error for invalid lines', () => {
const raw = '- [ ] Task 1\nInvalid Line\n- [ ] Task 3';
expect(() => (processor as any).validateAndCleanTaskList(raw)).toThrow('AI did not return a valid Markdown task checklist');
});
it('should throw error for lines with wrong checkbox marker', () => {
const raw = '- [x] Task 1\n- [-] Task 2';
expect(() => (processor as any).validateAndCleanTaskList(raw)).toThrow('AI did not return a valid Markdown task checklist');
});
it('should throw error for empty or null input', () => {
expect(() => (processor as any).validateAndCleanTaskList(null)).toThrow('AI response was empty or invalid');
expect(() => (processor as any).validateAndCleanTaskList('')).toThrow('AI response was empty or invalid');
expect(() => (processor as any).validateAndCleanTaskList(' \n \n ')).toThrow('AI response did not contain any valid task list items');
});
});
describe('processSpecification', () => {
const specText = 'Feature specification text.';
const codebaseSummary = 'Codebase summary.';
const featureId = 'test-feature';
const aiResponse = '- [ ] Task A\n- [ ] Task B';
const expectedFilePath = path.join(featureTasksDir, `feature_${featureId}_tasks.md`);
beforeEach(() => {
// Restore original implementations for these helper spies
(processor as any).validateAndCleanTaskList.mockRestore();
(processor as any).formatFinalMarkdown.mockRestore();
// Mock AI response for this suite
mockedAIService.generateText.mockResolvedValue(aiResponse);
});
it('should process specification, call AI, validate, format, and write file', async () => {
const resultPath = await processor.processSpecification(specText, codebaseSummary, featureId);
// 1. Check AI call
expect(mockedAIService.generateText).toHaveBeenCalledTimes(1);
const prompt = mockedAIService.generateText.mock.calls[0][0];
expect(prompt).toContain(specText);
expect(prompt).toContain(codebaseSummary);
expect(prompt).toContain('Format the tasks as a Markdown checklist');
// 2. Check write file call
expect(mockedFs.writeFile).toHaveBeenCalledTimes(1);
expect(mockedFs.writeFile).toHaveBeenCalledWith(expectedFilePath, expect.any(String), 'utf-8');
// 3. Check the content written (includes formatted parts)
const writtenContent = mockedFs.writeFile.mock.calls[0][1];
expect(writtenContent).toContain(`# ${specText.split('\n')[0]}`); // Title from spec
expect(writtenContent).toContain('## Feature Specification Summary');
expect(writtenContent).toContain('## Implementation Tasks');
expect(writtenContent).toContain(aiResponse); // The validated task list
expect(writtenContent).toContain('Lamplighter-MCP'); // Footer
// 4. Check result path
expect(resultPath).toBe(expectedFilePath);
});
it('should throw if AI generation fails', async () => {
const aiError = new Error('AI failed');
mockedAIService.generateText.mockRejectedValue(aiError);
await expect(processor.processSpecification(specText, codebaseSummary, featureId))
.rejects.toThrow(`Failed to process feature specification: ${aiError.message}`);
expect(mockedFs.writeFile).not.toHaveBeenCalled();
});
it('should throw if validation fails', async () => {
mockedAIService.generateText.mockResolvedValue('Invalid response'); // AI returns bad format
await expect(processor.processSpecification(specText, codebaseSummary, featureId))
.rejects.toThrow('AI did not return a valid Markdown task checklist');
expect(mockedFs.writeFile).not.toHaveBeenCalled();
});
it('should throw if writing file fails', async () => {
const writeError = new Error('Disk full');
mockedFs.writeFile.mockRejectedValue(writeError);
await expect(processor.processSpecification(specText, codebaseSummary, featureId))
.rejects.toThrow(`Failed to process feature specification: ${writeError.message}`);
});
});
});