@gohcltech/bitbucket-mcp
Version:
Bitbucket integration for Claude via Model Context Protocol
297 lines • 10.7 kB
JavaScript
import Bottleneck from 'bottleneck';
import { getLogger } from './logger.js';
import { RateLimitError } from './types.js';
/**
* Rate limiter implementation using Bottleneck library.
*
* Provides intelligent rate limiting for Bitbucket API requests using a combination
* of concurrent request limiting and token bucket algorithm. Prevents API quota
* exhaustion while maintaining optimal throughput.
*
* Features:
* - Concurrent request limiting
* - Token bucket algorithm for burst capacity
* - Automatic queue management
* - Detailed statistics and monitoring
* - Error handling and retry logic
* - Event-based logging integration
*
* @example
* ```typescript
* const rateLimiter = new RateLimiter({
* maxConcurrent: 10,
* minTime: 100,
* reservoir: 100,
* reservoirRefreshAmount: 50,
* reservoirRefreshInterval: 60000
* });
*
* await rateLimiter.execute(
* () => apiClient.get('/repositories'),
* { operationName: 'list_repositories', toolName: 'repository_tools' }
* );
* ```
*/
export class RateLimiter {
logger = getLogger();
limiter;
config;
stats = {
queued: 0,
running: 0,
done: 0,
failed: 0,
};
constructor(config) {
this.config = config;
this.limiter = new Bottleneck({
maxConcurrent: config.maxConcurrent,
minTime: config.minTime,
reservoir: config.reservoir,
reservoirRefreshAmount: config.reservoirRefreshAmount,
reservoirRefreshInterval: config.reservoirRefreshInterval,
retryCount: 0, // We handle retries in the error handler
});
this.setupEventHandlers();
this.logConfig();
}
/**
* Execute a function with rate limiting applied.
*
* Queues the operation according to rate limiting constraints and executes it
* when resources are available. Provides detailed logging and error handling.
*
* @template T - Return type of the operation
* @param operation - Async function to execute with rate limiting
* @param context - Execution context for logging and prioritization
* @param context.operationName - Human-readable operation name for logging
* @param context.toolName - Optional tool name for categorized logging
* @param context.priority - Optional priority (1-10, higher = more important)
* @returns Promise resolving to the operation result
* @throws {RateLimitError} When rate limits are exceeded
*
* @example
* ```typescript
* const result = await rateLimiter.execute(
* () => fetch('/api/repositories'),
* {
* operationName: 'fetch_repositories',
* toolName: 'repository_tools',
* priority: 7
* }
* );
* ```
*/
async execute(operation, context) {
const startTime = Date.now();
try {
this.stats.queued++;
this.logger.debug(`Queuing operation: ${context.operationName}`, {
operation: context.operationName,
tool: context.toolName,
priority: context.priority,
queueStats: this.getQueueStats(),
});
const result = await this.limiter.schedule({ priority: context.priority || 5 }, async () => {
this.stats.queued--;
this.stats.running++;
try {
const result = await operation();
this.stats.running--;
this.stats.done++;
const duration = Date.now() - startTime;
this.logger.debug(`Completed rate-limited operation: ${context.operationName}`, {
operation: context.operationName,
tool: context.toolName,
duration,
queueStats: this.getQueueStats(),
});
return result;
}
catch (error) {
this.stats.running--;
this.stats.failed++;
throw error;
}
});
return result;
}
catch (error) {
this.stats.queued = Math.max(0, this.stats.queued - 1);
// Check if this is a rate limit error from Bottleneck
if (this.isBottleneckRateLimitError(error)) {
const retryAfter = this.extractRetryAfter(error);
this.logger.rateLimitHit(context.toolName || context.operationName, retryAfter);
throw new RateLimitError(`Rate limit exceeded for ${context.operationName}`, retryAfter);
}
throw error;
}
}
/**
* Get current queue and processing statistics
*/
getStats() {
const bottleneckCounts = this.limiter.counts();
return {
queued: bottleneckCounts.QUEUED,
running: bottleneckCounts.RUNNING,
done: this.stats.done,
failed: this.stats.failed,
};
}
/**
* Get detailed queue information
*/
getQueueStats() {
return this.limiter.counts();
}
/**
* Check if the rate limiter is currently limiting requests
*/
isLimited() {
const counts = this.limiter.counts();
return counts.QUEUED > 0 || counts.RUNNING >= this.config.maxConcurrent;
}
/**
* Get the current reservoir level (available tokens)
*/
async getReservoirLevel() {
const level = await this.limiter.currentReservoir();
return level !== null ? level : 0;
}
/**
* Manually add tokens to the reservoir
*/
async addTokens(amount) {
await this.limiter.incrementReservoir(amount);
this.logger.debug('Added tokens to rate limiter reservoir', {
tokensAdded: amount,
newReservoirLevel: await this.getReservoirLevel(),
});
}
/**
* Reset the rate limiter state
*/
async reset() {
await this.limiter.stop({ dropWaitingJobs: false });
this.limiter = new Bottleneck(this.config);
this.setupEventHandlers();
this.stats = { queued: 0, running: 0, done: 0, failed: 0 };
this.logger.info('Rate limiter reset', {
config: this.config,
});
}
/**
* Gracefully stop the rate limiter
*/
async stop() {
try {
await this.limiter.stop({ dropWaitingJobs: false });
this.logger.info('Rate limiter stopped gracefully', {
finalStats: this.getStats(),
});
}
catch (error) {
this.logger.error('Error stopping rate limiter', error);
throw error;
}
}
/**
* Update rate limiter configuration
*/
updateConfig(newConfig) {
this.config = { ...this.config, ...newConfig };
// Update the Bottleneck instance
if (newConfig.maxConcurrent !== undefined) {
this.limiter.updateSettings({ maxConcurrent: newConfig.maxConcurrent });
}
if (newConfig.minTime !== undefined) {
this.limiter.updateSettings({ minTime: newConfig.minTime });
}
if (newConfig.reservoir !== undefined) {
this.limiter.updateSettings({ reservoir: newConfig.reservoir });
}
if (newConfig.reservoirRefreshAmount !== undefined) {
this.limiter.updateSettings({ reservoirRefreshAmount: newConfig.reservoirRefreshAmount });
}
if (newConfig.reservoirRefreshInterval !== undefined) {
this.limiter.updateSettings({ reservoirRefreshInterval: newConfig.reservoirRefreshInterval });
}
this.logger.info('Rate limiter configuration updated', {
config: this.config,
});
}
setupEventHandlers() {
this.limiter.on('error', (error) => {
this.logger.error('Rate limiter error', error, {
operation: 'rate_limiter_error',
});
});
this.limiter.on('failed', (error, jobInfo) => {
this.logger.warn('Rate limited job failed', {
operation: 'rate_limited_job_failed',
error: error instanceof Error ? error.message : String(error),
jobInfo,
stats: this.getStats(),
});
});
this.limiter.on('retry', (error, jobInfo) => {
this.logger.debug('Rate limited job retrying', {
operation: 'rate_limited_job_retry',
error: error instanceof Error ? error.message : String(error),
jobInfo,
});
});
this.limiter.on('empty', () => {
this.logger.debug('Rate limiter queue is empty', {
operation: 'rate_limiter_empty',
stats: this.getStats(),
});
});
this.limiter.on('idle', () => {
this.logger.debug('Rate limiter is idle', {
operation: 'rate_limiter_idle',
stats: this.getStats(),
});
});
}
logConfig() {
this.logger.info('Rate limiter initialized', {
config: this.config,
operation: 'rate_limiter_init',
});
}
isBottleneckRateLimitError(error) {
return error && ((typeof error.message === 'string' && error.message.includes('This job has been dropped by Bottleneck')) ||
(typeof error.message === 'string' && error.message.includes('rate limit')) ||
error.name === 'BottleneckError');
}
extractRetryAfter(error) {
// Try to extract retry-after information from the error
if (error.retryAfter) {
return typeof error.retryAfter === 'number' ? error.retryAfter : undefined;
}
// For Bottleneck errors, estimate based on minTime
return this.config.minTime || 1000;
}
}
// Singleton instance for global rate limiting
let globalRateLimiter;
export function createRateLimiter(config) {
globalRateLimiter = new RateLimiter(config);
return globalRateLimiter;
}
export function getRateLimiter() {
if (!globalRateLimiter) {
throw new Error('Rate limiter not initialized. Call createRateLimiter() first.');
}
return globalRateLimiter;
}
// Decorator function for automatic rate limiting
export function rateLimited(target, context) {
return (async (...args) => {
const rateLimiter = getRateLimiter();
return rateLimiter.execute(() => target(...args), context);
});
}
//# sourceMappingURL=rate-limiter.js.map