@andrebuzeli/advanced-memory-markdown-mcp
Version:
Advanced Memory Bank MCP v3.1.5 - Sistema avançado de gerenciamento de memória com isolamento de projetos por IDE, sincronização sob demanda, backup a cada 30min, apenas arquivos .md principais sincronizados, pasta reasoning temporária com limpeza automát
211 lines • 7.24 kB
JavaScript
/**
* Performance Optimizer - Advanced performance optimizations
* Implements async operations, streaming, and caching strategies
*/
import { VERSION } from '../version.js';
import { LRUCache } from 'lru-cache';
import { createReadStream, createWriteStream } from 'fs';
import { pipeline } from 'stream/promises';
import { Transform } from 'stream';
export class PerformanceOptimizer {
version = VERSION;
cache;
batchQueue = new Map();
batchTimeouts = new Map();
concurrentOperations = new Set();
metrics = {
operationTime: 0,
memoryUsage: 0,
cacheHitRate: 0,
concurrentOperations: 0,
};
constructor() {
this.cache = new LRUCache({
max: 1000, // Increased cache size
ttl: 1000 * 60 * 30, // 30 minutes TTL
allowStale: true, // Allow stale data for better performance
updateAgeOnGet: true,
updateAgeOnHas: true,
maxSize: 50 * 1024 * 1024, // 50MB max cache size
sizeCalculation: (value) => {
return JSON.stringify(value).length;
},
dispose: (value, key) => {
// Cleanup when items are evicted
if (value && typeof value.cleanup === 'function') {
value.cleanup();
}
}
});
}
/**
* Execute operations in parallel with controlled concurrency
*/
async executeParallel(operations, maxConcurrency = 5) {
const startTime = Date.now();
const results = new Array(operations.length);
const executing = [];
for (let i = 0; i < operations.length; i++) {
const operation = operations[i];
const executeOperation = async (index) => {
if (!operation)
return;
const operationPromise = operation();
this.concurrentOperations.add(operationPromise);
try {
results[index] = await operationPromise;
}
finally {
this.concurrentOperations.delete(operationPromise);
}
};
const promise = executeOperation(i);
executing.push(promise);
if (executing.length >= maxConcurrency) {
await Promise.race(executing);
executing.splice(executing.findIndex(p => p !== promise), 1);
}
}
await Promise.all(executing);
this.metrics.operationTime = Date.now() - startTime;
this.metrics.concurrentOperations = this.concurrentOperations.size;
return results;
}
/**
* Batch operations with automatic flushing
*/
async batchOperation(key, operation, batchProcessor, batchSize = 10, flushTimeout = 1000) {
if (!this.batchQueue.has(key)) {
this.batchQueue.set(key, []);
}
const batch = this.batchQueue.get(key);
batch.push(operation);
// Clear existing timeout
const existingTimeout = this.batchTimeouts.get(key);
if (existingTimeout) {
clearTimeout(existingTimeout);
}
// Set new timeout
const timeout = setTimeout(async () => {
await this.flushBatch(key, batchProcessor);
}, flushTimeout);
this.batchTimeouts.set(key, timeout);
// Flush if batch size reached
if (batch.length >= batchSize) {
clearTimeout(timeout);
this.batchTimeouts.delete(key);
await this.flushBatch(key, batchProcessor);
}
}
/**
* Stream large file operations
*/
async streamFileOperation(inputPath, outputPath, transformer) {
const transformStream = new Transform({
objectMode: false,
transform(chunk, encoding, callback) {
try {
const transformedChunk = transformer(chunk.toString());
callback(null, transformedChunk);
}
catch (error) {
callback(error instanceof Error ? error : new Error(String(error)));
}
},
});
await pipeline(createReadStream(inputPath), transformStream, createWriteStream(outputPath));
}
/**
* Lazy loading with caching
*/
async lazyLoad(key, loader, useCache = true) {
if (useCache && this.cache.has(key)) {
this.updateCacheHitRate(true);
return this.cache.get(key);
}
this.updateCacheHitRate(false);
const result = await loader();
if (useCache) {
this.cache.set(key, result);
}
return result;
}
/**
* Memory-efficient data processing with streaming
*/
async processLargeDataset(data, processor, chunkSize = 100) {
const results = [];
for (let i = 0; i < data.length; i += chunkSize) {
const chunk = data.slice(i, i + chunkSize);
const chunkResults = await this.executeParallel(chunk.map(item => () => processor(item)), Math.min(5, chunkSize));
results.push(...chunkResults);
// Allow garbage collection between chunks
await new Promise(resolve => setImmediate(resolve));
}
return results;
}
/**
* Debounced operations to prevent excessive calls
*/
debounce(func, delay) {
let timeoutId = null;
let resolvePromise = null;
let rejectPromise = null;
return (...args) => {
return new Promise((resolve, reject) => {
if (timeoutId) {
clearTimeout(timeoutId);
}
resolvePromise = resolve;
rejectPromise = reject;
timeoutId = setTimeout(async () => {
try {
const result = await func(...args);
resolvePromise?.(result);
}
catch (error) {
rejectPromise?.(error);
}
}, delay);
});
};
}
/**
* Get current performance metrics
*/
getMetrics() {
this.metrics.memoryUsage = process.memoryUsage().heapUsed;
return { ...this.metrics };
}
/**
* Clear all caches and reset state
*/
clearCache() {
this.cache.clear();
this.batchQueue.clear();
for (const timeout of this.batchTimeouts.values()) {
clearTimeout(timeout);
}
this.batchTimeouts.clear();
}
/**
* Flush a specific batch
*/
async flushBatch(key, batchProcessor) {
const batch = this.batchQueue.get(key);
if (batch && batch.length > 0) {
this.batchQueue.set(key, []);
await batchProcessor(batch);
}
}
/**
* Update cache hit rate metrics
*/
updateCacheHitRate(hit) {
const currentRate = this.metrics.cacheHitRate;
this.metrics.cacheHitRate = hit ?
(currentRate + 1) / 2 :
currentRate * 0.9;
}
}
//# sourceMappingURL=performance-optimizer.js.map