UNPKG

@penkov/tasks_queue

Version:

A lightweight PostgreSQL-backed task queue system with scheduling, retries, backoff strategies, and priority handling. Designed for efficiency and observability in modern Node.js applications.

101 lines (100 loc) 3.95 kB
import { Collection, mutable, none, some } from "scats"; import { TasksQueueService } from "./tasks-queue.service.js"; import { TasksAuxiliaryWorker } from "./tasks-auxiliary-worker.js"; import log4js from "log4js"; import { TaskPeriodType, } from "./tasks-model.js"; export const DEFAULT_POOL = "default"; const logger = log4js.getLogger("TasksPoolsService"); export class TasksPoolsService { dao; pools; queuesPool = new mutable.HashMap(); auxiliaryWorker; constructor(dao, manageTasksQueueService, runAuxiliaryWorker, pools = [ { name: DEFAULT_POOL, concurrency: 1, loopInterval: 60000, }, ]) { this.dao = dao; const poolsCollection = Collection.from(pools); const poolNames = poolsCollection.map((p) => p.name).toSet; if (poolsCollection.size !== poolNames.size) { throw new Error("Duplicate pool names detected"); } this.pools = poolsCollection.toMap((p) => [ p.name, new TasksQueueService(dao, manageTasksQueueService, { concurrency: p.concurrency, runAuxiliaryWorker: false, loopInterval: p.loopInterval, }), ]); this.auxiliaryWorker = runAuxiliaryWorker ? some(new TasksAuxiliaryWorker(dao, manageTasksQueueService)) : none; } start() { logger.info(`Starting TasksPoolsService with ${this.pools.size} pools`); this.auxiliaryWorker.foreach((w) => w.start()); this.pools.values.foreach((p) => p.start()); } async stop(timeoutMs = 30000) { logger.info("Stopping TasksPoolsService"); try { await Promise.race([ Promise.all([ this.auxiliaryWorker.mapPromise((w) => w.stop()), this.pools.values.mapPromise((p) => p.stop()), ]), new Promise((_, reject) => setTimeout(() => reject(new Error("Stop timeout")), timeoutMs)), ]); logger.info("TasksPoolsService stopped successfully"); } catch (e) { logger.error("Failed to stop TasksPoolsService gracefully", e); throw e; } } registerWorker(queueName, worker, poolName = DEFAULT_POOL) { if (this.queuesPool.containsKey(queueName)) { throw new Error(`Queue '${queueName}' is already registered in pool '${this.queuesPool.get(queueName).getOrElseValue("unknown")}'`); } this.pools.get(poolName).match({ some: (pool) => { pool.registerWorker(queueName, worker); this.queuesPool.put(queueName, poolName); logger.info(`Registered worker for queue '${queueName}' in pool '${poolName}'`); }, none: () => { throw new Error(`Pool '${poolName}' not registered`); }, }); } async schedule(task) { const taskId = await this.dao.schedule(task); this.taskScheduled(task.queue, taskId); } async scheduleAtFixedRate(task) { const taskId = await this.dao.schedulePeriodic(task, TaskPeriodType.fixed_rate); this.taskScheduled(task.queue, taskId); } async scheduleAtFixedDelay(task) { const taskId = await this.dao.schedulePeriodic(task, TaskPeriodType.fixed_delay); this.taskScheduled(task.queue, taskId); } taskScheduled(queue, taskId) { this.queuesPool.get(queue).match({ some: (poolName) => { this.pools.get(poolName).foreach((pool) => { pool.taskScheduled(queue); }); }, none: () => { logger.info(`No worker registered for a queue '${queue}'. ` + `Task (id=${taskId.getOrElseValue(-1)}) will remain in pending state`); }, }); } }