UNPKG

hikma-engine

Version:

Code Knowledge Graph Indexer - A sophisticated TypeScript-based indexer that transforms Git repositories into multi-dimensional knowledge stores for AI agents

349 lines (348 loc) 13.4 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.performanceOptimizer = exports.PerformanceOptimizer = void 0; const lru_cache_1 = require("lru-cache"); const api_config_1 = require("../config/api-config"); const logger_1 = require("../../utils/logger"); const logger = (0, logger_1.getLogger)('PerformanceOptimizer'); class PerformanceOptimizer { constructor() { this.caches = new Map(); this.connectionPools = new Map(); this.requestQueue = []; this.activeRequests = new Set(); this.responseTimeHistory = []; this.cacheStats = new Map(); this.initializeCaches(); this.initializeConnectionPools(); this.startPerformanceMonitoring(); } static getInstance() { if (!PerformanceOptimizer.instance) { PerformanceOptimizer.instance = new PerformanceOptimizer(); } return PerformanceOptimizer.instance; } initializeCaches() { const cacheConfig = api_config_1.apiConfig.getCacheConfig(); if (!cacheConfig.enabled) { return; } // Initialize different cache types with specific TTLs const cacheTypes = [ { name: 'semantic', ttl: cacheConfig.ttl.semantic }, { name: 'structural', ttl: cacheConfig.ttl.structural }, { name: 'git', ttl: cacheConfig.ttl.git }, { name: 'hybrid', ttl: cacheConfig.ttl.hybrid }, { name: 'comprehensive', ttl: cacheConfig.ttl.comprehensive }, { name: 'metadata', ttl: 3600 }, // 1 hour for metadata { name: 'health', ttl: 300 }, // 5 minutes for health data ]; cacheTypes.forEach(({ name, ttl }) => { const cache = new lru_cache_1.LRUCache({ max: cacheConfig.maxSize, ttl: ttl * 1000, // Convert to milliseconds updateAgeOnGet: true, allowStale: false, }); this.caches.set(name, cache); this.cacheStats.set(name, { hits: 0, misses: 0 }); }); logger.info('Performance caches initialized', { cacheTypes: cacheTypes.map(c => c.name), maxSize: cacheConfig.maxSize, }); } initializeConnectionPools() { // Initialize connection pools for different services // This would be implemented based on actual database clients // Example: Database connection pool this.connectionPools.set('database', { maxConnections: 20, activeConnections: 0, idleConnections: [], waitingQueue: [], }); // Example: HTTP client pool this.connectionPools.set('http', { maxConnections: 50, activeConnections: 0, keepAlive: true, timeout: 30000, }); logger.info('Connection pools initialized'); } startPerformanceMonitoring() { // Monitor performance metrics every 30 seconds setInterval(() => { this.collectPerformanceMetrics(); this.optimizeMemoryUsage(); this.cleanupExpiredData(); }, 30000); } // Cache Management async getFromCache(cacheType, key, fallbackFn, options) { const cache = this.caches.get(cacheType); if (!cache) { if (fallbackFn) { return await fallbackFn(); } return null; } const stats = this.cacheStats.get(cacheType); const cachedValue = cache.get(key); if (cachedValue !== undefined) { stats.hits++; return cachedValue; } stats.misses++; if (fallbackFn) { try { const value = await fallbackFn(); this.setCache(cacheType, key, value, options); return value; } catch (error) { logger.error('Cache fallback function failed', { cacheType, key, error }); return null; } } return null; } setCache(cacheType, key, value, options) { const cache = this.caches.get(cacheType); if (!cache) { return; } const setOptions = {}; if (options?.ttl) { setOptions.ttl = options.ttl * 1000; // Convert to milliseconds } cache.set(key, value, setOptions); } invalidateCache(cacheType, key) { const cache = this.caches.get(cacheType); if (!cache) { return; } if (key) { cache.delete(key); } else { cache.clear(); } } getCacheStats(cacheType) { if (cacheType) { const cache = this.caches.get(cacheType); const stats = this.cacheStats.get(cacheType); if (!cache || !stats) { return {}; } const total = stats.hits + stats.misses; return { size: cache.size, maxSize: cache.max, hits: stats.hits, misses: stats.misses, hitRate: total > 0 ? (stats.hits / total) * 100 : 0, }; } const allStats = {}; for (const [type, cache] of this.caches) { allStats[type] = this.getCacheStats(type); } return allStats; } // Request Queue Management async queueRequest(requestId, requestFn, priority = 0) { // Add request to queue this.requestQueue.push({ id: requestId, timestamp: new Date(), priority, }); // Sort queue by priority (higher priority first) this.requestQueue.sort((a, b) => b.priority - a.priority); // Wait for turn if queue is full while (this.activeRequests.size >= this.getMaxConcurrentRequests()) { await new Promise(resolve => setTimeout(resolve, 10)); } // Execute request this.activeRequests.add(requestId); const startTime = Date.now(); try { const result = await requestFn(); const responseTime = Date.now() - startTime; this.recordResponseTime(responseTime); return result; } finally { this.activeRequests.delete(requestId); this.requestQueue = this.requestQueue.filter(req => req.id !== requestId); } } getMaxConcurrentRequests() { // Dynamic concurrency based on system resources const memoryUsage = process.memoryUsage(); const memoryUsagePercent = (memoryUsage.heapUsed / memoryUsage.heapTotal) * 100; if (memoryUsagePercent > 80) { return 5; // Reduce concurrency under high memory usage } else if (memoryUsagePercent > 60) { return 10; } else { return 20; // Normal concurrency } } recordResponseTime(responseTime) { this.responseTimeHistory.push(responseTime); // Keep only last 1000 response times if (this.responseTimeHistory.length > 1000) { this.responseTimeHistory = this.responseTimeHistory.slice(-1000); } } // Memory Optimization optimizeMemoryUsage() { const memoryUsage = process.memoryUsage(); const memoryUsagePercent = (memoryUsage.heapUsed / memoryUsage.heapTotal) * 100; if (memoryUsagePercent > 85) { logger.warn('High memory usage detected, triggering optimization', { memoryUsagePercent, heapUsed: Math.round(memoryUsage.heapUsed / 1024 / 1024), heapTotal: Math.round(memoryUsage.heapTotal / 1024 / 1024), }); // Reduce cache sizes for (const [type, cache] of this.caches) { const currentSize = cache.size; const targetSize = Math.floor(currentSize * 0.7); // Reduce by 30% if (currentSize > targetSize) { // Clear some entries to reduce memory usage const entriesToRemove = currentSize - targetSize; const keys = Array.from(cache.keys()); for (let i = 0; i < entriesToRemove && keys.length > 0; i++) { cache.delete(keys[i]); } logger.info(`Reduced cache size for ${type}`, { from: currentSize, to: cache.size, }); } } // Force garbage collection if available if (global.gc) { global.gc(); } } } cleanupExpiredData() { // Clean up old response time history const cutoffTime = Date.now() - (24 * 60 * 60 * 1000); // 24 hours ago this.responseTimeHistory = this.responseTimeHistory.filter((_, index) => index >= this.responseTimeHistory.length - 100 // Keep last 100 ); // Clean up old queued requests (older than 5 minutes) const queueCutoff = new Date(Date.now() - 5 * 60 * 1000); this.requestQueue = this.requestQueue.filter(req => req.timestamp > queueCutoff); } // Performance Metrics getPerformanceMetrics() { const cacheStats = this.getCacheStats(); const totalHits = Object.values(cacheStats).reduce((sum, stats) => sum + (stats.hits || 0), 0); const totalRequests = Object.values(cacheStats).reduce((sum, stats) => sum + (stats.hits || 0) + (stats.misses || 0), 0); const averageResponseTime = this.responseTimeHistory.length > 0 ? this.responseTimeHistory.reduce((sum, time) => sum + time, 0) / this.responseTimeHistory.length : 0; return { cacheHitRate: totalRequests > 0 ? (totalHits / totalRequests) * 100 : 0, averageResponseTime: Math.round(averageResponseTime), memoryUsage: process.memoryUsage(), activeConnections: this.activeRequests.size, queuedRequests: this.requestQueue.length, }; } collectPerformanceMetrics() { const metrics = this.getPerformanceMetrics(); // Log performance metrics periodically logger.debug('Performance metrics collected', { cacheHitRate: `${metrics.cacheHitRate.toFixed(2)}%`, averageResponseTime: `${metrics.averageResponseTime}ms`, memoryUsed: `${Math.round(metrics.memoryUsage.heapUsed / 1024 / 1024)}MB`, activeConnections: metrics.activeConnections, queuedRequests: metrics.queuedRequests, }); // Alert on performance issues if (metrics.averageResponseTime > 5000) { logger.warn('High average response time detected', { averageResponseTime: metrics.averageResponseTime, }); } if (metrics.cacheHitRate < 50 && this.responseTimeHistory.length > 100) { logger.warn('Low cache hit rate detected', { cacheHitRate: metrics.cacheHitRate, }); } } // Connection Pool Management async getConnection(poolName) { const pool = this.connectionPools.get(poolName); if (!pool) { throw new Error(`Connection pool '${poolName}' not found`); } // This would implement actual connection pooling logic // For now, return a mock connection return { id: `conn_${Date.now()}`, poolName }; } releaseConnection(poolName, connection) { const pool = this.connectionPools.get(poolName); if (!pool) { return; } // This would implement actual connection release logic logger.debug('Connection released', { poolName, connectionId: connection.id }); } // Batch Processing async batchProcess(items, processFn, batchSize = 10) { const results = []; for (let i = 0; i < items.length; i += batchSize) { const batch = items.slice(i, i + batchSize); const batchResults = await processFn(batch); results.push(...batchResults); // Small delay between batches to prevent overwhelming the system if (i + batchSize < items.length) { await new Promise(resolve => setTimeout(resolve, 10)); } } return results; } // Streaming for large datasets async *streamResults(dataSource, chunkSize = 100) { let chunk = []; for await (const item of dataSource()) { chunk.push(item); if (chunk.length >= chunkSize) { yield chunk; chunk = []; } } if (chunk.length > 0) { yield chunk; } } // Cleanup destroy() { // Clear all caches for (const cache of this.caches.values()) { cache.clear(); } // Clear connection pools this.connectionPools.clear(); // Clear queues this.requestQueue = []; this.activeRequests.clear(); this.responseTimeHistory = []; logger.info('Performance optimizer destroyed'); } } exports.PerformanceOptimizer = PerformanceOptimizer; exports.performanceOptimizer = PerformanceOptimizer.getInstance();