@juspay/neurolink
Version:
Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and
213 lines (212 loc) • 7.24 kB
JavaScript
/**
* Adaptive Semaphore Utility
*
* Provides a sophisticated semaphore implementation with dynamic concurrency adjustment
* for optimal resource utilization and performance tuning based on response times and error rates.
*/
import { logger } from "../../utils/logger.js";
/**
* Adaptive semaphore that automatically adjusts concurrency based on performance metrics
*/
export class AdaptiveSemaphore {
count;
waiters = [];
currentConcurrency;
activeRequests = 0;
completedCount = 0;
errorCount = 0;
responseTimes = [];
maxConcurrency;
minConcurrency;
constructor(config) {
this.currentConcurrency = config.initialConcurrency;
this.count = config.initialConcurrency;
this.maxConcurrency = config.maxConcurrency;
this.minConcurrency = config.minConcurrency;
logger.debug("AdaptiveSemaphore initialized", {
initialConcurrency: config.initialConcurrency,
maxConcurrency: config.maxConcurrency,
minConcurrency: config.minConcurrency,
});
}
/**
* Acquire a semaphore permit, waiting if necessary
*/
async acquire() {
return new Promise((resolve) => {
if (this.count > 0) {
this.count--;
this.activeRequests++;
resolve();
}
else {
this.waiters.push(() => {
this.count--;
this.activeRequests++;
resolve();
});
}
});
}
/**
* Release a semaphore permit and wake up waiting requests
*/
release() {
this.activeRequests--;
if (this.waiters.length > 0) {
const waiter = this.waiters.shift();
waiter();
}
else {
this.count++;
}
}
/**
* Record successful completion with response time for adaptive adjustment
*/
recordSuccess(responseTimeMs) {
this.completedCount++;
this.responseTimes.push(responseTimeMs);
// Keep only recent response times for calculation (last 10 responses)
if (this.responseTimes.length > 10) {
this.responseTimes.shift();
}
this.adjustConcurrencyBasedOnPerformance(responseTimeMs, false);
}
/**
* Record error for adaptive adjustment
*/
recordError(responseTimeMs) {
this.errorCount++;
if (responseTimeMs) {
this.responseTimes.push(responseTimeMs);
if (this.responseTimes.length > 10) {
this.responseTimes.shift();
}
}
this.adjustConcurrencyBasedOnPerformance(responseTimeMs || 0, true);
}
/**
* Manually adjust concurrency level
*/
adjustConcurrency(newLimit) {
const clampedLimit = Math.max(this.minConcurrency, Math.min(this.maxConcurrency, newLimit));
const diff = clampedLimit - (this.currentConcurrency - this.count);
this.count += diff;
this.currentConcurrency = clampedLimit;
logger.debug("Concurrency adjusted", {
newConcurrency: clampedLimit,
previousConcurrency: this.currentConcurrency - diff,
availableCount: this.count,
activeRequests: this.activeRequests,
});
// Wake up waiting requests if we increased concurrency
while (this.count > 0 && this.waiters.length > 0) {
const waiter = this.waiters.shift();
this.count--;
this.activeRequests++;
waiter();
}
}
/**
* Get current performance metrics
*/
getMetrics() {
const averageResponseTime = this.responseTimes.length > 0
? this.responseTimes.reduce((sum, time) => sum + time, 0) /
this.responseTimes.length
: 0;
return {
activeRequests: this.activeRequests,
currentConcurrency: this.currentConcurrency,
completedCount: this.completedCount,
errorCount: this.errorCount,
averageResponseTime,
waitingCount: this.waiters.length,
};
}
/**
* Reset metrics for new batch or session
*/
resetMetrics() {
this.completedCount = 0;
this.errorCount = 0;
this.responseTimes = [];
}
/**
* Automatically adjust concurrency based on performance indicators
*/
adjustConcurrencyBasedOnPerformance(responseTimeMs, isError) {
const metrics = this.getMetrics();
if (isError) {
// On error, reduce concurrency to be more conservative
if (this.currentConcurrency > this.minConcurrency) {
this.adjustConcurrency(Math.max(this.minConcurrency, this.currentConcurrency - 1));
logger.warn("Reduced concurrency due to error", {
newConcurrency: this.currentConcurrency,
errorCount: this.errorCount,
});
}
return;
}
// Only adjust after we have some data to work with
if (this.completedCount < 3) {
return;
}
const fastResponseThreshold = 2000; // 2 seconds
const slowResponseThreshold = 5000; // 5 seconds
if (responseTimeMs < fastResponseThreshold &&
metrics.averageResponseTime < fastResponseThreshold &&
this.currentConcurrency < this.maxConcurrency) {
// Fast responses and no bottleneck - increase concurrency
this.adjustConcurrency(Math.min(this.maxConcurrency, this.currentConcurrency + 1));
logger.debug("Increased concurrency due to fast responses", {
newConcurrency: this.currentConcurrency,
averageResponseTime: metrics.averageResponseTime,
});
}
else if (responseTimeMs > slowResponseThreshold &&
this.currentConcurrency > this.minConcurrency) {
// Slow responses - decrease concurrency
this.adjustConcurrency(Math.max(this.minConcurrency, this.currentConcurrency - 1));
logger.debug("Decreased concurrency due to slow responses", {
newConcurrency: this.currentConcurrency,
responseTime: responseTimeMs,
});
}
}
/**
* Check if semaphore is idle (no active or waiting requests)
*/
isIdle() {
return this.activeRequests === 0 && this.waiters.length === 0;
}
/**
* Get current concurrency limit
*/
getCurrentConcurrency() {
return this.currentConcurrency;
}
/**
* Get number of active requests
*/
getActiveRequestCount() {
return this.activeRequests;
}
/**
* Get number of waiting requests
*/
getWaitingRequestCount() {
return this.waiters.length;
}
}
/**
* Factory function to create an adaptive semaphore with default configuration
*/
export function createAdaptiveSemaphore(initialConcurrency, maxConcurrency = 10, minConcurrency = 1) {
return new AdaptiveSemaphore({
initialConcurrency,
maxConcurrency,
minConcurrency,
});
}