UNPKG

@measey/mycoder-agent

Version:

Agent module for mycoder - an AI-powered software development assistant

122 lines 4.59 kB
/** * Tests for the compactHistory tool */ import { describe, expect, it, vi } from 'vitest'; import { TokenTracker } from '../../../core/tokens.js'; import { compactHistory } from '../compactHistory.js'; // Mock the createProvider function vi.mock('../../../core/llm/provider.js', () => ({ createProvider: vi.fn().mockReturnValue({ name: 'openai', provider: 'openai.chat', model: 'gpt-3.5-turbo', generateText: vi.fn(), }), })); // Mock the generateText function vi.mock('../../../core/llm/core.js', () => ({ generateText: vi.fn().mockResolvedValue({ text: 'This is a summary of the conversation.', tokenUsage: { input: 100, output: 50, cacheReads: 0, cacheWrites: 0, }, }), })); describe('compactHistory tool', () => { it('should return a message when there are not enough messages to compact', async () => { // Setup const messages = [ { role: 'user', content: 'Hello' }, { role: 'assistant', content: 'Hi there' }, ]; const context = { messages, provider: 'openai', model: 'gpt-3.5-turbo', baseUrl: 'https://api.openai.com/v1', apiKey: 'sk-test', tokenTracker: new TokenTracker('test'), logger: { info: vi.fn(), debug: vi.fn(), error: vi.fn(), }, }; // Execute const result = await compactHistory({ preserveRecentMessages: 10 }, context); // Verify expect(result).toContain('Not enough messages'); expect(messages.length).toBe(2); // Messages should remain unchanged }); it('should compact messages and preserve recent ones', async () => { // Setup const messages = [ { role: 'user', content: 'Message 1' }, { role: 'assistant', content: 'Response 1' }, { role: 'user', content: 'Message 2' }, { role: 'assistant', content: 'Response 2' }, { role: 'user', content: 'Message 3' }, { role: 'assistant', content: 'Response 3' }, { role: 'user', content: 'Recent message 1' }, { role: 'assistant', content: 'Recent response 1' }, ]; const context = { messages, provider: 'openai', model: 'gpt-3.5-turbo', baseUrl: 'https://api.openai.com/v1', apiKey: 'sk-test', tokenTracker: new TokenTracker('test'), logger: { info: vi.fn(), debug: vi.fn(), error: vi.fn(), }, }; // Execute const result = await compactHistory({ preserveRecentMessages: 2 }, context); // Verify expect(result).toContain('Successfully compacted'); expect(messages.length).toBe(3); // 1 summary + 2 preserved messages expect(messages[0]?.role).toBe('system'); // First message should be the summary expect(messages[0]?.content).toContain('COMPACTED MESSAGE HISTORY'); expect(messages[1]?.content).toBe('Recent message 1'); // Preserved message expect(messages[2]?.content).toBe('Recent response 1'); // Preserved message }); it('should use custom prompt when provided', async () => { // Setup const messages = Array.from({ length: 20 }, (_, i) => ({ role: i % 2 === 0 ? 'user' : 'assistant', content: `Message ${i + 1}`, })); const context = { messages, provider: 'openai', model: 'gpt-3.5-turbo', baseUrl: 'https://api.openai.com/v1', apiKey: 'sk-test', tokenTracker: new TokenTracker('test'), logger: { info: vi.fn(), debug: vi.fn(), error: vi.fn(), }, }; // Import the actual generateText to spy on it const { generateText } = await import('../../../core/llm/core.js'); // Execute await compactHistory({ preserveRecentMessages: 5, customPrompt: 'Custom summarization prompt', }, context); // Verify expect(generateText).toHaveBeenCalled(); // Since we're mocking the function, we can't actually check the content // of the messages passed to it. We'll just verify it was called. expect(true).toBe(true); }); }); //# sourceMappingURL=compactHistory.test.js.map