queuex-sdk
Version:
A TypeScript-based queue management SDK with Redis support
190 lines (189 loc) • 7.45 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.RedisStorage = void 0;
const ioredis_1 = __importDefault(require("ioredis"));
const job_1 = require("../models/job");
const queue_1 = require("../models/queue");
/**
* Redis-based storage for jobs and events using Streams for logs.
*/
class RedisStorage {
constructor(connectionString) {
try {
this.client = new ioredis_1.default(connectionString);
}
catch (err) {
throw new Error(`Failed to connect to Redis: ${err instanceof Error ? err.message : String(err)}`);
}
}
/**
* Enqueues a job using the specified queue strategy.
*
* Strategies:
* - FIFO: Uses RPUSH to add jobs to the end of the list
* - LIFO: Uses LPUSH to add jobs to the beginning of the list
* - PRIORITY: Uses ZADD with priority as score
* - ROUND_ROBIN: Uses RPUSH with a circular buffer
*/
async enqueueJob(job, strategy = queue_1.QueueStrategy.FIFO) {
const key = `queue:${job.queue}:jobs`;
try {
switch (strategy) {
case queue_1.QueueStrategy.FIFO:
await this.client.rpush(key, JSON.stringify(job));
break;
case queue_1.QueueStrategy.LIFO:
await this.client.lpush(key, JSON.stringify(job));
break;
case queue_1.QueueStrategy.PRIORITY:
const priority = this.getPriorityScore(job.options.priority);
await this.client.zadd(key, priority, JSON.stringify(job));
break;
case queue_1.QueueStrategy.ROUND_ROBIN:
await this.client.rpush(key, JSON.stringify(job));
break;
}
if (job.options.delay && job.scheduledAt && job.state === job_1.JobState.DELAYED) {
await this.scheduleJob(job);
}
}
catch (err) {
throw new Error(`Failed to enqueue job ${job.id}: ${err instanceof Error ? err.message : String(err)}`);
}
}
/**
* Gets the next job based on the queue strategy.
*/
async getNextJob(queue, strategy = queue_1.QueueStrategy.FIFO) {
const key = `queue:${queue}:jobs`;
try {
let jobData = null;
switch (strategy) {
case queue_1.QueueStrategy.FIFO:
jobData = await this.client.lpop(key);
break;
case queue_1.QueueStrategy.LIFO:
jobData = await this.client.rpop(key);
break;
case queue_1.QueueStrategy.PRIORITY:
const results = await this.client.zrange(key, 0, 0);
if (results.length > 0) {
jobData = results[0];
await this.client.zrem(key, jobData);
}
break;
case queue_1.QueueStrategy.ROUND_ROBIN:
jobData = await this.client.lpop(key);
if (jobData) {
// Move the job to the end of the queue for round-robin
await this.client.rpush(key, jobData);
}
break;
}
return jobData ? JSON.parse(jobData) : null;
}
catch (err) {
throw new Error(`Failed to get next job from ${queue}: ${err instanceof Error ? err.message : String(err)}`);
}
}
/**
* Converts priority level to a numeric score for Redis sorted sets.
*/
getPriorityScore(priority) {
switch (priority) {
case 'high':
return 3;
case 'medium':
return 2;
case 'low':
return 1;
default:
return 2; // Default to medium priority
}
}
async scheduleJob(job) {
const scheduledKey = `queue:${job.queue}:scheduled`;
try {
await this.client.zadd(scheduledKey, job.scheduledAt.toString(), job.id);
await this.client.set(`job:${job.id}:data`, JSON.stringify(job), 'EX', 60 * 60 * 24);
}
catch (err) {
throw new Error(`Failed to schedule job ${job.id}: ${err instanceof Error ? err.message : String(err)}`);
}
}
async saveJobResult(job) {
const resultKey = `job:${job.id}:result`;
try {
await this.client.set(resultKey, JSON.stringify(job), 'EX', 60 * 60 * 24);
}
catch (err) {
throw new Error(`Failed to save job result ${job.id}: ${err instanceof Error ? err.message : String(err)}`);
}
}
async logEvent(event, job) {
const streamKey = `job:${job.id}:logs`;
try {
await this.client.xadd(streamKey, '*', 'event', event, 'state', job.state, 'timestamp', Date.now().toString(), 'message', job.logs[job.logs.length - 1] || event // Use latest log or event name
);
await this.client.expire(streamKey, 60 * 60 * 24); // TTL: 24 hours
}
catch (err) {
throw new Error(`Failed to log event for job ${job.id}: ${err instanceof Error ? err.message : String(err)}`);
}
}
async getEvents(jobId) {
const streamKey = `job:${jobId}:logs`;
try {
const entries = await this.client.xrange(streamKey, '-', '+');
return entries.map((entry) => {
const [, fields] = entry;
const data = {};
for (let i = 0; i < fields.length; i += 2) {
data[fields[i]] = fields[i + 1];
}
return `${data.event}:${data.timestamp}:${data.state}:${data.message}`;
});
}
catch (err) {
throw new Error(`Failed to get events for job ${jobId}: ${err instanceof Error ? err.message : String(err)}`);
}
}
async getScheduledJobs(queue, maxScore) {
const scheduledKey = `queue:${queue}:scheduled`;
try {
const jobIds = await this.client.zrangebyscore(scheduledKey, 0, maxScore.toString());
const jobs = [];
for (const jobId of jobIds) {
const jobData = await this.client.get(`job:${jobId}:data`);
if (jobData)
jobs.push(jobData);
}
return jobs;
}
catch (err) {
throw new Error(`Failed to get scheduled jobs for ${queue}: ${err instanceof Error ? err.message : String(err)}`);
}
}
async removeScheduledJob(queue, jobId) {
const scheduledKey = `queue:${queue}:scheduled`;
try {
await this.client.zrem(scheduledKey, jobId);
await this.client.del(`job:${jobId}:data`);
}
catch (err) {
throw new Error(`Failed to remove scheduled job ${jobId}: ${err instanceof Error ? err.message : String(err)}`);
}
}
async disconnect() {
try {
await this.client.quit();
}
catch (err) {
console.error('Failed to disconnect Redis:', err);
}
}
}
exports.RedisStorage = RedisStorage;