dt-common-device
Version:
A secure and robust device management library for IoT applications
278 lines (277 loc) • 15.6 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.QueueUtils = void 0;
const rateLimit_utils_1 = require("./rateLimit.utils");
const axios_1 = __importDefault(require("axios"));
const dt_audit_library_1 = require("dt-audit-library");
const config_1 = require("../../config/config");
const redis_1 = require("../../db/redis");
class QueueUtils {
static getQueueKey(microservice, connectionId, provider) {
return `${microservice}_${provider}_${connectionId}`;
}
static getRequestQueueKey(connectionId, provider) {
return `${connectionId}_${provider}`;
}
static getOrCreateQueue(queueKey, queues) {
return (queues.get(queueKey) ??
queues
.set(queueKey, new (require("bullmq").Queue)(queueKey, {
connection: (0, redis_1.getRedisClient)(),
}))
.get(queueKey));
}
static getOrCreateWorker(queueKey, workers, processFunction, jobResults) {
if (workers.has(queueKey)) {
const existingWorker = workers.get(queueKey);
console.log(`[${new Date().toISOString()}] [getOrCreateWorker] Worker already exists for queue: ${queueKey}, checking if it's running...`);
// Check if worker is still connected/running
// If worker is closed or errored, recreate it
if (existingWorker && typeof existingWorker.isRunning === "function") {
const isRunning = existingWorker.isRunning();
console.log(`[${new Date().toISOString()}] [getOrCreateWorker] Existing worker running status: ${isRunning}`);
if (!isRunning) {
console.log(`[${new Date().toISOString()}] [getOrCreateWorker] Worker not running, closing and recreating...`);
try {
existingWorker.close();
}
catch (e) {
// Ignore errors when closing
}
workers.delete(queueKey);
// Continue to create new worker below
}
else {
return; // Worker is running, keep using it
}
}
else {
// Worker object exists but might be stale, try to use it
console.log(`[${new Date().toISOString()}] [getOrCreateWorker] Existing worker found but cannot verify status, keeping it`);
return;
}
}
console.log(`[${new Date().toISOString()}] [getOrCreateWorker] Creating new worker for queue: ${queueKey}`);
const { Worker } = require("bullmq");
// Wrap the process function to catch any errors during execution
const wrappedProcessFunction = async (job) => {
const wrapperStartTime = Date.now();
console.log(`[${new Date().toISOString()}] [Worker Wrapper] Worker picked up job - JobId: ${job?.id}, QueueKey: ${queueKey}, JobName: ${job?.name}, Delay: ${job?.opts?.delay || 0}ms`);
try {
const result = await processFunction(job);
const processingTime = Date.now() - wrapperStartTime;
console.log(`[${new Date().toISOString()}] [Worker Wrapper] Job processed successfully - JobId: ${job?.id}, ProcessingTime: ${processingTime}ms, ResultType: ${typeof result}, HasResult: ${result !== undefined && result !== null}`);
return result;
}
catch (error) {
const processingTime = Date.now() - wrapperStartTime;
console.log(`[${new Date().toISOString()}] [Worker Wrapper] Error processing job - JobId: ${job?.id}, Error: ${error.message}, Stack: ${error.stack}, ProcessingTime: ${processingTime}ms`);
throw error;
}
};
const worker = new Worker(queueKey, wrappedProcessFunction, {
connection: (0, redis_1.getRedisClient)(),
concurrency: 1,
removeOnComplete: { count: 100, age: 3600 }, // Keep completed jobs for debugging
removeOnFail: { count: 100, age: 3600 },
lockDuration: 300000,
stalledInterval: 30000, // Check for stalled jobs more frequently
});
console.log(`[${new Date().toISOString()}] [getOrCreateWorker] Worker instance created for queue: ${queueKey}`);
// Event handlers for job tracking
worker.on("active", (job) => {
console.log(`[${new Date().toISOString()}] [Worker Event] Job active - JobId: ${job.id}, QueueKey: ${queueKey}, JobName: ${job.name}, Data: ${JSON.stringify(job.data || {})}`);
});
worker.on("waiting", (jobId) => {
console.log(`[${new Date().toISOString()}] [Worker Event] Job waiting - JobId: ${jobId}, QueueKey: ${queueKey}`);
});
worker.on("delayed", (job) => {
console.log(`[${new Date().toISOString()}] [Worker Event] Job delayed - JobId: ${job.id}, QueueKey: ${queueKey}, Delay: ${job.opts?.delay || 0}ms`);
});
worker.on("completed", (job) => {
console.log(`[${new Date().toISOString()}] [Worker Event] Job completed - JobId: ${job.id}, QueueKey: ${queueKey}, ReturnValueType: ${typeof job.returnvalue}, HasReturnValue: ${job.returnvalue !== undefined}`);
(0, config_1.getConfig)().LOGGER.info(`HTTP request completed: ${job.id} [${queueKey}]`);
const result = job.returnvalue;
const jobResult = {
result,
resolved: true,
timestamp: Date.now(),
};
console.log(`[${new Date().toISOString()}] [Worker Event] Storing job result - JobId: ${job.id}, ResultType: ${typeof result}, Resolved: true`);
jobResults.set(job.id, jobResult);
console.log(`[${new Date().toISOString()}] [Worker Event] Job result stored in jobResults - JobId: ${job.id}, StoredResult: ${JSON.stringify(jobResult)}`);
});
worker.on("failed", (job, err) => {
console.log(`[${new Date().toISOString()}] [Worker Event] Job failed - JobId: ${job?.id}, QueueKey: ${queueKey}, Error: ${err.message}`);
(0, config_1.getConfig)().LOGGER.error(`HTTP request failed: ${job?.id} [${queueKey}], Error: ${err.message}`);
jobResults.set(job.id, {
error: err.message,
resolved: true,
timestamp: Date.now(),
});
console.log(`[${new Date().toISOString()}] [Worker Event] Job error stored in jobResults - JobId: ${job?.id}`);
});
worker.on("error", (err) => {
console.log(`[${new Date().toISOString()}] [Worker Event] Worker error - QueueKey: ${queueKey}, Error: ${err.message}, Stack: ${err.stack}`);
(0, config_1.getConfig)().LOGGER.error(`Worker error for ${queueKey}: ${err.message}`);
});
worker.on("ready", () => {
console.log(`[${new Date().toISOString()}] [Worker Event] Worker ready - QueueKey: ${queueKey}`);
});
worker.on("closing", () => {
console.log(`[${new Date().toISOString()}] [Worker Event] Worker closing - QueueKey: ${queueKey}`);
});
worker.on("closed", () => {
console.log(`[${new Date().toISOString()}] [Worker Event] Worker closed - QueueKey: ${queueKey}`);
});
workers.set(queueKey, worker);
console.log(`[${new Date().toISOString()}] [getOrCreateWorker] Worker initialized and stored for queue: ${queueKey}`);
(0, config_1.getConfig)().LOGGER.info(`Worker initialized for queue: ${queueKey}`);
}
static async waitForRateLimitExpiry(connectionId, provider, rateLimitConfigs) {
const key = `rate_limit:${provider}:${connectionId}`;
const config = rateLimitConfigs.get(provider);
if (!config)
return;
console.log(`[${new Date().toISOString()}] [waitForRateLimitExpiry] Starting wait for ${provider} [${connectionId}]`);
while (true) {
const timestamps = await rateLimit_utils_1.RateLimitUtils.getRawRequestTimestamps(key);
const now = Date.now();
const windowStart = now - config.windowMs;
const recentRequests = timestamps.filter((t) => t > windowStart);
if (recentRequests.length < config.maxRequests) {
// Rate limit not exceeded, we can proceed
console.log(`[${new Date().toISOString()}] [waitForRateLimitExpiry] Rate limit cleared, proceeding`);
break;
}
// Calculate when the earliest request will expire
const earliestRequest = recentRequests[0];
const nextAvailableTime = earliestRequest + config.windowMs;
const delay = Math.max(nextAvailableTime - now, 1000); // At least 1 second
console.log(`[${new Date().toISOString()}] [waitForRateLimitExpiry] Rate limit exceeded, waiting ${delay}ms. Requests in window: ${recentRequests.length}/${config.maxRequests}`);
(0, config_1.getConfig)().LOGGER.info(`Rate limit exceeded for ${provider} [${connectionId}]. Waiting ${delay}ms until next allowed request. Current requests in window: ${recentRequests.length}/${config.maxRequests}`);
// Wait for the calculated delay
await new Promise((resolve) => setTimeout(resolve, delay));
}
}
static async executeHttpRequest(url, method, options, connectionId, provider) {
console.log(`[${new Date().toISOString()}] [executeHttpRequest] Starting HTTP request - ${method} ${url} -> ${provider} [${connectionId}]`);
(0, config_1.getConfig)().LOGGER.info(`Executing: ${method} ${url} -> ${provider} [${connectionId}]`);
try {
// Record the request first
await rateLimit_utils_1.RateLimitUtils.recordRequest(connectionId, provider);
// Execute the HTTP request
const response = await (0, axios_1.default)({
method: method.toLowerCase(),
url: url,
headers: options.headers || {},
timeout: 60000,
...(options.body && { data: options.body }),
...(options.params && { params: options.params }),
});
console.log(`[${new Date().toISOString()}] [executeHttpRequest] HTTP request successful - ${method} ${url} for ${provider} [${connectionId}]`);
(0, config_1.getConfig)().LOGGER.info(`HTTP request successful: ${method} ${url} for ${provider} [${connectionId}]`);
// Return only the response data
return response.data;
}
catch (error) {
console.log(`[${new Date().toISOString()}] [executeHttpRequest] HTTP request failed - ${method} ${url} for ${provider} [${connectionId}], Error: ${error.message}`);
(0, config_1.getConfig)().LOGGER.error(`HTTP request failed: ${error.message}`);
await (0, dt_audit_library_1.publishAudit)({
eventType: "http.request.error",
properties: {
connectionId,
provider,
endpoint: url,
method,
timestamp: Date.now(),
reason: "execution_error",
errorMessage: error.message,
error: error,
},
});
// Throw the error instead of returning it
throw error;
}
}
static async addJobToQueue(queueKey, jobData, delay, queues) {
console.log(`[${new Date().toISOString()}] [addJobToQueue] Adding job to BullMQ queue - QueueKey: ${queueKey}, Delay: ${delay}ms`);
const queue = this.getOrCreateQueue(queueKey, queues);
const job = await queue.add("http-request", jobData, {
delay,
attempts: 1,
removeOnComplete: { age: 300, count: 100 },
removeOnFail: { age: 300, count: 100 },
});
console.log(`[${new Date().toISOString()}] [addJobToQueue] Job added to BullMQ - JobId: ${job.id}, QueueKey: ${queueKey}, Delay: ${delay}ms`);
// Log queue state for debugging
try {
const [waiting, delayed, active] = await Promise.all([
queue.getWaitingCount(),
queue.getDelayedCount(),
queue.getActiveCount(),
]);
console.log(`[${new Date().toISOString()}] [addJobToQueue] Queue state - QueueKey: ${queueKey}, Waiting: ${waiting}, Delayed: ${delayed}, Active: ${active}`);
}
catch (e) {
// Ignore errors getting queue state
}
return job.id;
}
static async waitForJobCompletion(jobId, queueKey, jobDelay = 0, windowMs = 60000) {
// Simple: delay (wait) + windowMs (execute) + HTTP timeout buffer
const httpTimeout = 60000;
const totalTimeout = jobDelay + windowMs + httpTimeout;
console.log(`[${new Date().toISOString()}] [waitForJobCompletion] Waiting for job ${jobId} - delay: ${jobDelay}ms, timeout: ${totalTimeout}ms`);
const startTime = Date.now();
const { Queue } = require("bullmq");
const queue = new Queue(queueKey, {
connection: (0, redis_1.getRedisClient)(),
});
return new Promise(async (resolve, reject) => {
const checkInterval = setInterval(async () => {
try {
const job = await queue.getJob(jobId);
if (!job) {
const elapsed = Date.now() - startTime;
if (elapsed > totalTimeout) {
clearInterval(checkInterval);
await queue.close();
return reject(new Error(`Job ${jobId} not found and timeout exceeded`));
}
return; // Job not found yet, keep checking
}
const state = await job.getState();
if (state === "completed") {
console.log(`[${new Date().toISOString()}] [waitForJobCompletion] Job ${jobId} completed`);
clearInterval(checkInterval);
const result = job.returnvalue;
await queue.close();
return resolve(result);
}
if (state === "failed") {
console.log(`[${new Date().toISOString()}] [waitForJobCompletion] Job ${jobId} failed: ${job.failedReason}`);
clearInterval(checkInterval);
await queue.close();
return reject(new Error(job.failedReason || "Job failed"));
}
}
catch (error) {
clearInterval(checkInterval);
await queue.close();
reject(error);
}
}, 500); // Check every 500ms
// Timeout
setTimeout(() => {
clearInterval(checkInterval);
queue.close();
reject(new Error(`Request timeout: Maximum wait time exceeded (${totalTimeout}ms). Job delay was ${jobDelay}ms.`));
}, totalTimeout);
});
}
}
exports.QueueUtils = QueueUtils;