@simonecoelhosfo/optimizely-mcp-server
Version:
Optimizely MCP Server for AI assistants with integrated CLI tools
82 lines • 4.48 kB
JavaScript
/**
* Simple cache test to verify basic functionality
*/
import { QueryNormalizer } from './cache/QueryNormalizer.js';
import { CacheKeyGenerator } from './cache/CacheKeyGenerator.js';
import { InMemoryCache } from './cache/InMemoryCache.js';
import { TTLStrategy } from './cache/TTLStrategy.js';
import { CacheOrchestrator } from './cache/CacheOrchestrator.js';
console.log('Testing Intelligent Query Cache Components...\n');
async function testCacheComponents() {
try {
// Test 1: QueryNormalizer
console.log('1. Testing QueryNormalizer');
const normalizer = new QueryNormalizer();
const queries = [
'show all flags',
'list flags',
'get flags',
'how many experiments are running',
'count experiments where status = running'
];
queries.forEach(q => {
const normalized = normalizer.normalize(q);
console.log(` "${q}" -> entity: ${normalized.entity}, operation: ${normalized.operation}`);
});
// Test 2: CacheKeyGenerator
console.log('\n2. Testing CacheKeyGenerator');
const keyGen = new CacheKeyGenerator();
const normalized1 = normalizer.normalize('show all flags');
const key1 = keyGen.generateKey(normalized1, { projectId: '12345' });
console.log(` Key for "show all flags": ${key1}`);
const normalized2 = normalizer.normalize('list flags');
const key2 = keyGen.generateKey(normalized2, { projectId: '12345' });
console.log(` Key for "list flags": ${key2}`);
console.log(` Keys are ${key1 === key2 ? 'SAME' : 'DIFFERENT'} (should be SAME)`);
// Test 3: InMemoryCache
console.log('\n3. Testing InMemoryCache');
const cache = new InMemoryCache({ maxSizeMB: 1, maxEntries: 100 });
await cache.set('test-key-1', { data: 'test data 1' }, 60000);
await cache.set('test-key-2', { data: 'test data 2' }, 60000);
const result1 = await cache.get('test-key-1');
console.log(` Cache hit for key1: ${result1 ? 'YES' : 'NO'}`);
const result3 = await cache.get('test-key-3');
console.log(` Cache hit for key3: ${result3 ? 'YES' : 'NO'} (should be NO)`);
const stats = cache.getStats();
console.log(` Cache stats: ${stats.entries} entries, ${stats.hits} hits, ${stats.misses} misses`);
// Test 4: TTLStrategy
console.log('\n4. Testing TTLStrategy');
const ttlStrategy = new TTLStrategy();
const ttl1 = ttlStrategy.calculateTTL({ entity: 'flag', operation: 'list', filters: {}, projections: [], joins: [], aggregations: [] });
console.log(` TTL for flag list: ${ttl1 / 1000}s`);
const ttl2 = ttlStrategy.calculateTTL({ entity: 'results', operation: 'analyze', filters: {}, projections: [], joins: [], aggregations: [] });
console.log(` TTL for results analyze: ${ttl2 / 1000}s`);
// Test 5: CacheOrchestrator
console.log('\n5. Testing CacheOrchestrator');
const orchestrator = new CacheOrchestrator({ enabled: true });
let executionCount = 0;
const mockExecutor = async (query) => {
executionCount++;
console.log(` Executing query (execution #${executionCount})`);
return { data: [`result for ${JSON.stringify(query)}`], metadata: { rowCount: 1, executionTime: 50, cacheHit: false } };
};
// First call - should execute
const result1a = await orchestrator.executeWithCache('show all flags', mockExecutor);
console.log(` First call - cached: ${result1a.cached}, execution count: ${executionCount}`);
// Second call - should be cached
const result2a = await orchestrator.executeWithCache('show all flags', mockExecutor);
console.log(` Second call - cached: ${result2a.cached}, execution count: ${executionCount}`);
// Different query - should execute
const result3a = await orchestrator.executeWithCache('show all experiments', mockExecutor);
console.log(` Different query - cached: ${result3a.cached}, execution count: ${executionCount}`);
orchestrator.shutdown();
cache.shutdown();
console.log('\nAll cache components are working correctly!');
}
catch (error) {
console.error('Test failed:', error);
process.exit(1);
}
}
testCacheComponents();
//# sourceMappingURL=test-cache-simple.js.map