UNPKG

faktory-worker

Version:
344 lines (343 loc) 12.5 kB
"use strict"; var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); exports.Worker = exports.SHUTDOWN_TIMEOUT_EXCEEDED_MSG = exports.CLEANUP_DELAY_MS = void 0; const debug_1 = __importDefault(require("debug")); const uuid_1 = require("uuid"); const assert_1 = require("assert"); const events_1 = require("events"); const promises_1 = require("timers/promises"); const client_1 = require("./client"); const utils_1 = require("./utils"); const utils_2 = require("./utils"); const create_execution_chain_1 = __importDefault(require("./create-execution-chain")); const queues_1 = require("./queues"); const debug = (0, debug_1.default)("faktory-worker:worker"); const fail = Symbol("fail"); exports.CLEANUP_DELAY_MS = process.env.NODE_ENV === "test" ? 100 : 3000; exports.SHUTDOWN_TIMEOUT_EXCEEDED_MSG = "faktory worker shutdown timeout exceeded"; /** * Representation of a worker process with many concurrent job processors. Works at the * concurrency set in options during construction. Will hold at most `concurrency` jobs * in-memory while processing at any one time. Listens for signals to quiet or shutdown. * Should not be started more than once per-process, nor should more than one worker be * started per-process. * * @example * const worker = new Worker({ * queues: ['critical', 'default', 'low'], * }); * * worker.work(); */ class Worker extends events_1.EventEmitter { /** * @param {object} [options] * @param {String} [options.wid=uuid().slice(0, 8)]: the wid the worker will use * @param {Number} [options.concurrency=20]: how many jobs this worker can process at once * @param {Number} [options.timeout=8]: the amount of time in seconds that the worker * may take to finish a job before exiting ungracefully * @param {Number} [options.beatInterval=15]: the amount of time in seconds between each * heartbeat * @param {string[]} [options.queues=['default']]: the queues this worker will fetch jobs from * @param {function[]} [options.middleware=[]]: a set of middleware to run before performing * each job * in koa.js-style middleware execution signature * @param {Registry} [options.registry=Registry]: the job registry to use when working * @param {Number} [options.poolSize=concurrency+2] the client connection pool size for * this worker */ constructor(options = {}) { super(); this.wid = options.wid || (0, uuid_1.v4)().slice(0, 8); this.concurrency = options.concurrency || 20; this.shutdownTimeout = (options.timeout || 8) * 1000; this.beatInterval = (options.beatInterval || 15) * 1000; const queues = options.queues || []; if (typeof queues === "function") { this.queueFn = queues; } else if (Array.isArray(queues)) { this.queueFn = (0, queues_1.strictlyOrdered)(queues.length ? queues : ["default"]); } else { this.queueFn = (0, queues_1.weightedRandom)(queues); } this.middleware = options.middleware || []; this.registry = options.registry || {}; this.working = new Map(); this.client = new client_1.Client({ wid: this.wid, url: options.url, host: options.host, port: options.port, password: options.password, poolSize: options.poolSize || this.concurrency + 2, labels: options.labels || [], }); this.on("error", this.onerror); } async tick() { if (this.quieted) return; if (this.abortCtl.signal.aborted) return; try { if (this.working.size >= this.concurrency) { await Promise.race(this.working.values()); } else { const job = await this.fetch(); if (job) { this.working.set(job, this.handle(job)); } } } catch (e) { this.emit("error", e); await (0, utils_2.sleep)(1000); } finally { this.tick(); } } /** * starts the worker fetch loop and job processing * * @return self, when working has been stopped by a signal or concurrent * call to stop or quiet * @see Worker.quiet * @see Worker.stop */ async work() { debug("work concurrency=%i", this.concurrency); this.quieted = false; this.abortCtl = new AbortController(); this.execute = (0, create_execution_chain_1.default)(this.middleware, this.registry); await this.beat(); this.pulse = setInterval(async () => { try { await this.beat(); } catch (error) { this.emit("error", new Error(`Worker failed heartbeat: ${error.message}\n${error.stack}`)); } }, this.beatInterval); this.untrapSignals = this.trapSignals(); this.tick(); return this; } /** * Signals to the worker to discontinue fetching new jobs and allows the worker * to continue processing any currently-running jobs */ quiet() { debug("quiet"); this.quieted = true; } /** * stops the worker * * @return {promise} resolved when worker stops */ async stop() { var _a, _b; debug("stop"); this.quiet(); debug("deregistering signal handlers"); (_a = this.untrapSignals) === null || _a === void 0 ? void 0 : _a.call(this); // @TODO if SIGINTed a second time, skip ahead to abort const abortTimeoutCtl = new AbortController(); const abortAfterTimeout = async () => { var _a; await (0, promises_1.setTimeout)(this.shutdownTimeout, undefined, { signal: abortTimeoutCtl.signal, }); debug(exports.SHUTDOWN_TIMEOUT_EXCEEDED_MSG); (_a = this.abortCtl) === null || _a === void 0 ? void 0 : _a.abort(new Error(exports.SHUTDOWN_TIMEOUT_EXCEEDED_MSG)); try { // FAIL in-progress jobs as they have been aborted await Promise.all([...this.working.keys()].map((job) => { var _a; return this[fail](job, (_a = this.abortCtl) === null || _a === void 0 ? void 0 : _a.signal.reason); })); } catch (e) { // jobs aren't necessarily lost here, as they will be requeued by the server // after their reservation timeout this.emit("error", e); } // An abort signal was sent, but jobs may need a little time to do cleanup. await (0, promises_1.setTimeout)(exports.CLEANUP_DELAY_MS, undefined, { signal: abortTimeoutCtl.signal, }); }; const allJobsComplete = async () => { var _a; debug(`awaiting ${this.working.size} job${this.working.size > 1 ? "s" : ""} in progress`); await Promise.all(this.working.values()); // jobs were aborted and have a little time to cleanup if ((_a = this.abortCtl) === null || _a === void 0 ? void 0 : _a.signal.aborted) return; // jobs finished before an abort debug("all clear"); // and we can cancel the imminent abort/hard shutdown abortTimeoutCtl.abort(); }; try { await Promise.race([allJobsComplete(), abortAfterTimeout()]); } catch (e) { if (e.code !== "ABORT_ERR") { throw e; } else { this.emit("error", e); } } finally { clearInterval(this.pulse); await this.client.close(); if ((_b = this.abortCtl) === null || _b === void 0 ? void 0 : _b.signal.aborted) { process.exit(1); } } } async [fail](job, error) { const { jid } = job; await this.client.fail(jid, error); debug(`FAIL ${jid}`); this.emit("fail", { job, error }); } /** * Sends a heartbeat for this server and interprets the response state (if present) * to quiet or terminate the worker */ async beat() { const response = await this.client.beat(); switch (response) { case "quiet": this.quiet(); break; case "terminate": this.stop(); break; default: break; } } get queues() { return this.queueFn(); } /** * Fetches a job from the defined queues. * * @private * @return {JobPayload|null} a job payload from the server or null when there are * no jobs */ fetch() { return this.client.fetch(...this.queues); } /** * Handles a job from the server by executing it and either acknowledging * or failing the job when done * * @private * @param {JobPayload} job the job payload from the server * @return {Promise<string>} 'ack' or 'fail' depending on job handling result */ async handle(job) { var _a; const { jid } = job; let error; try { debug(`executing ${jid}`); await this.execute({ job, signal: this.abortCtl.signal }); } catch (e) { error = (0, utils_1.wrapNonErrors)(e); } try { if ((_a = this.abortCtl) === null || _a === void 0 ? void 0 : _a.signal.aborted) { // job will be FAILed in the shutdown task return "abort"; } else if (!error) { await this.client.ack(jid); debug(`ACK ${jid}`); return "done"; } else { await this[fail](job, error); return "fail"; } } catch (e) { this.emit("error", e); return "error"; } finally { this.working.delete(job); } } /** * Adds a middleware function to the stack * * @param {Function} fn koa-compose-style middleware function * @return {FaktoryControl} this * @instance * @see {@link https://github.com/koajs/koa/blob/master/docs/guide.md#writing-middleware|koa middleware} * @example * faktory.use(async (ctx, next) => { * // a pool you created to hold database connections * pool.use(async (conn) => { * ctx.db = conn; * await next(); * }); * }); */ use(fn) { (0, assert_1.strict)(typeof fn === "function"); debug("use %s", fn.name || "-"); this.middleware.push(fn); return this; } onerror(error) { if (this.listenerCount("error") === 1) console.error(error); } /** * Adds a {@link JobFunction|JobFunction} to the {@link Registry} * * @param {Jobtype} name string descriptor for the jobtype * @param {JobFunction} fn * @return {FaktoryControl} this * @instance * @example * faktory.register('MyJob', (...args) => { * // some work * }); */ register(name, fn) { (0, assert_1.strict)(typeof fn === "function", "a registered job must be a function"); debug("registered %s", name); this.registry[name] = fn; return this; } /** * @private */ trapSignals() { // istanbul ignore next const stop = () => this.stop(); const quiet = () => this.quiet(); process.once("SIGTERM", stop).once("SIGTSTP", quiet).once("SIGINT", stop); return () => { process .removeListener("SIGTERM", stop) .removeListener("SIGTSTP", quiet) .removeListener("SIGINT", stop); }; } } exports.Worker = Worker;