actionhero
Version:
The reusable, scalable, and quick node.js API server for stateless and stateful applications
390 lines (389 loc) • 17.5 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.task = void 0;
const index_1 = require("./../index");
var task;
(function (task_1) {
/**
* Enqueue a task to be performed in the background.
* Will throw an error if redis cannot be reached.
*/
async function enqueue(taskName, inputs, queue = index_1.api.tasks.tasks[taskName].queue) {
await validateInput(taskName, inputs);
return index_1.api.resque.queue.enqueue(queue, taskName, [inputs]);
}
task_1.enqueue = enqueue;
/**
* Enqueue a task to be performed in the background, at a certain time in the future.
* Will throw an error if redis cannot be reached.
*
* Inputs:
* * taskName: The name of the task.
* * inputs: inputs to pass to the task.
* * queue: (Optional) Which queue/priority to run this instance of the task on.
* * suppressDuplicateTaskError: (optional) Suppress errors when the same task with the same arguments are double-enqueued for the same time
*/
async function enqueueAt(timestamp, taskName, inputs, queue = index_1.api.tasks.tasks[taskName].queue, suppressDuplicateTaskError = false) {
await validateInput(taskName, inputs);
return index_1.api.resque.queue.enqueueAt(timestamp, queue, taskName, [inputs], suppressDuplicateTaskError);
}
task_1.enqueueAt = enqueueAt;
/**
* Enqueue a task to be performed in the background, at a certain number of ms from now.
* Will throw an error if redis cannot be reached.
*
* Inputs:
* * timestamp: At what time the task is able to be run. Does not guarantee that the task will be run at this time. (in ms)
* * taskName: The name of the task.
* * inputs: inputs to pass to the task.
* * queue: (Optional) Which queue/priority to run this instance of the task on.
* * suppressDuplicateTaskError: (optional) Suppress errors when the same task with the same arguments are double-enqueued for the same time
*/
async function enqueueIn(time, taskName, inputs, queue = index_1.api.tasks.tasks[taskName].queue, suppressDuplicateTaskError = false) {
await validateInput(taskName, inputs);
return index_1.api.resque.queue.enqueueIn(time, queue, taskName, [inputs], suppressDuplicateTaskError);
}
task_1.enqueueIn = enqueueIn;
/**
* Delete a previously enqueued task, which hasn't been run yet, from a queue.
* Will throw an error if redis cannot be reached.
*
* Inputs:
* * q: Which queue/priority is the task stored on?
* * taskName: The name of the job, likely to be the same name as a tak.
* * args: The arguments of the job. Note, arguments passed to a Task initially may be modified when enqueuing. It is best to read job properties first via `api.tasks.queued` or similar method.
* * count: Of the jobs that match q, taskName, and args, up to what position should we delete? (Default 0; this command is 0-indexed)
*/
async function del(q, taskName, args, count) {
return index_1.api.resque.queue.del(q, taskName, [args], count);
}
task_1.del = del;
/**
* * will delete all jobs in the given queue of the named function/class
* * will not prevent new jobs from being added as this method is running
* * will not delete jobs in the delayed queues
*
* Inputs:
* * q: Which queue/priority is to run on?
* * taskName: The name of the job, likely to be the same name as a tak.
* * start? - starting position of task count to remove
* * stop? - stop position of task count to remove
*/
async function delByFunction(q, taskName, start, stop) {
return index_1.api.resque.queue.delByFunction(q, taskName, start, stop);
}
task_1.delByFunction = delByFunction;
/**
* Delete all previously enqueued tasks, which haven't been run yet, from all possible delayed timestamps.
* Will throw an error if redis cannot be reached.
*
* Inputs:
* * q: Which queue/priority is to run on?
* * taskName: The name of the job, likely to be the same name as a tak.
* * inputs The arguments of the job. Note, arguments passed to a Task initially may be modified when enqueuing. It is best to read job properties first via `api.tasks.delayedAt` or similar method.
*/
async function delDelayed(q, taskName, inputs) {
return index_1.api.resque.queue.delDelayed(q, taskName, [inputs]);
}
task_1.delDelayed = delDelayed;
/**
* Return the timestamps a task is scheduled for.
* Will throw an error if redis cannot be reached.
*
* Inputs:
* * q: Which queue/priority is to run on?
* * taskName: The name of the job, likely to be the same name as a tak.
* * inputs: The arguments of the job. Note, arguments passed to a Task initially may be modified when enqueuing. It is best to read job properties first via `api.tasks.delayedAt` or similar method.
*/
async function scheduledAt(q, taskName, inputs) {
return index_1.api.resque.queue.scheduledAt(q, taskName, [inputs]);
}
task_1.scheduledAt = scheduledAt;
/**
* Return all resque stats for this namespace (how jobs failed, jobs succeeded, etc)
* Will throw an error if redis cannot be reached.
*/
async function stats() {
return index_1.api.resque.queue.stats();
}
task_1.stats = stats;
/**
* Retrieve the details of jobs enqueued on a certain queue between start and stop (0-indexed)
* Will throw an error if redis cannot be reached.
*
* Inputs:
* * q The name of the queue.
* * start The index of the first job to return.
* * stop The index of the last job to return.
*/
async function queued(q, start, stop) {
return index_1.api.resque.queue.queued(q, start, stop);
}
task_1.queued = queued;
/**
* Delete a queue in redis, and all jobs stored on it.
* Will throw an error if redis cannot be reached.
*/
async function delQueue(q) {
return index_1.api.resque.queue.delQueue(q);
}
task_1.delQueue = delQueue;
/**
* Return any locks, as created by resque plugins or task middleware, in this redis namespace.
* Will contain locks with keys like `resque:lock:{job}` and `resque:workerslock:{workerId}`
* Will throw an error if redis cannot be reached.
*/
async function locks() {
return index_1.api.resque.queue.locks();
}
task_1.locks = locks;
/**
* Delete a lock on a job or worker. Locks can be found via `api.tasks.locks`
* Will throw an error if redis cannot be reached.
*/
async function delLock(lock) {
return index_1.api.resque.queue.delLock(lock);
}
task_1.delLock = delLock;
/**
* List all timestamps for which tasks are enqueued in the future, via `api.tasks.enqueueIn` or `api.tasks.enqueueAt`
* Will throw an error if redis cannot be reached.
*/
async function timestamps() {
return index_1.api.resque.queue.timestamps();
}
task_1.timestamps = timestamps;
/**
* Return all jobs which have been enqueued to run at a certain timestamp.
* Will throw an error if redis cannot be reached.
*/
async function delayedAt(timestamp) {
return index_1.api.resque.queue.delayedAt(timestamp);
}
task_1.delayedAt = delayedAt;
/**
* Return all delayed jobs, organized by the timestamp at where they are to run at.
* Note: This is a very slow command.
* Will throw an error if redis cannot be reached.
*/
async function allDelayed() {
return index_1.api.resque.queue.allDelayed();
}
task_1.allDelayed = allDelayed;
/**
* Return all workers registered by all members of this cluster.
* Note: MultiWorker processors each register as a unique worker.
* Will throw an error if redis cannot be reached.
*/
async function workers() {
return index_1.api.resque.queue.workers();
}
task_1.workers = workers;
/**
* What is a given worker working on? If the worker is idle, 'started' will be returned.
* Will throw an error if redis cannot be reached.
*/
async function workingOn(workerName, queues) {
return index_1.api.resque.queue.workingOn(workerName, queues);
}
task_1.workingOn = workingOn;
/**
* Return all workers and what job they might be working on.
* Will throw an error if redis cannot be reached.
*/
async function allWorkingOn() {
return index_1.api.resque.queue.allWorkingOn();
}
task_1.allWorkingOn = allWorkingOn;
/**
* How many jobs are in the failed queue.
* Will throw an error if redis cannot be reached.
*/
async function failedCount() {
return index_1.api.resque.queue.failedCount();
}
task_1.failedCount = failedCount;
/**
* Retrieve the details of failed jobs between start and stop (0-indexed).
* Will throw an error if redis cannot be reached.
*/
async function failed(start, stop) {
return index_1.api.resque.queue.failed(start, stop);
}
task_1.failed = failed;
/**
* Remove a specific job from the failed queue.
* Will throw an error if redis cannot be reached.
*/
async function removeFailed(failedJob) {
return index_1.api.resque.queue.removeFailed(failedJob);
}
task_1.removeFailed = removeFailed;
/**
* Remove a specific job from the failed queue, and retry it by placing it back into its original queue.
* Will throw an error if redis cannot be reached.
*/
async function retryAndRemoveFailed(failedJob) {
return index_1.api.resque.queue.retryAndRemoveFailed(failedJob);
}
task_1.retryAndRemoveFailed = retryAndRemoveFailed;
/**
* If a worker process crashes, it will leave its state in redis as "working".
* You can remove workers from redis you know to be over, by specificizing an age which would make them too old to exist.
* This method will remove the data created by a 'stuck' worker and move the payload to the error queue.
* However, it will not actually remove any processes which may be running. A job *may* be running that you have removed.
* Will throw an error if redis cannot be reached.
*/
async function cleanOldWorkers(age) {
return index_1.api.resque.queue.cleanOldWorkers(age);
}
task_1.cleanOldWorkers = cleanOldWorkers;
/**
* Ensures that a task which has a frequency is either running, or already enqueued.
* This is run automatically at boot for all tasks which have a frequency, via `api.tasks.enqueueAllRecurrentTasks`.
* Will throw an error if redis cannot be reached.
*/
async function enqueueRecurrentTask(taskName) {
const thisTask = index_1.api.tasks.tasks[taskName];
if (thisTask.frequency > 0) {
await task.del(thisTask.queue, taskName);
await task.delDelayed(thisTask.queue, taskName);
await task.enqueueIn(thisTask.frequency, taskName, {}, undefined, true);
(0, index_1.log)(`re-enqueued recurrent job ${taskName}`, index_1.config.tasks.schedulerLogging.reEnqueue);
}
}
task_1.enqueueRecurrentTask = enqueueRecurrentTask;
/**
* This is run automatically at boot for all tasks which have a frequency, calling `api.tasks.enqueueRecurrentTask`
* Will throw an error if redis cannot be reached.
*/
async function enqueueAllRecurrentTasks() {
const enqueuedTasks = [];
for (const thisTask of Object.values(index_1.api.tasks.tasks)) {
if (thisTask.frequency > 0) {
try {
const toRun = await task.enqueue(thisTask.name, {});
if (toRun === true) {
(0, index_1.log)(`enqueuing periodic task: ${thisTask.name}`, index_1.config.tasks.schedulerLogging.enqueue);
enqueuedTasks.push(thisTask.name);
}
}
catch (error) {
checkForRepeatRecurringTaskEnqueue(thisTask.name, error);
}
}
}
return enqueuedTasks;
}
task_1.enqueueAllRecurrentTasks = enqueueAllRecurrentTasks;
/**
* Stop a task with a frequency by removing it from all possible queues.
* Will throw an error if redis cannot be reached.
*/
async function stopRecurrentTask(taskName) {
// find the jobs in either the normal queue or delayed queues
const thisTask = index_1.api.tasks.tasks[taskName];
if (thisTask.frequency > 0) {
let removedCount = 0;
const count = await task.del(thisTask.queue, thisTask.name, null, 1);
removedCount = removedCount + count;
const timestamps = await task.delDelayed(thisTask.queue, thisTask.name, null);
removedCount = removedCount + timestamps.length;
return removedCount;
}
}
task_1.stopRecurrentTask = stopRecurrentTask;
/**
* Return wholistic details about the task system, including failures, queues, and workers.
* Will throw an error if redis cannot be reached.
*/
async function details() {
const details = { queues: {}, workers: {}, stats: null, leader: null };
details.workers = await task.allWorkingOn();
details.stats = await task.stats();
const queues = await index_1.api.resque.queue.queues();
for (const i in queues) {
const queue = queues[i];
const length = await index_1.api.resque.queue.length(queue);
details.queues[queue] = { length: length };
}
details.leader = await index_1.api.resque.queue.leader();
return details;
}
task_1.details = details;
async function addMiddleware(middleware) {
if (!middleware.name) {
throw new Error("middleware.name is required");
}
if (!middleware.priority) {
middleware.priority = index_1.config.general.defaultMiddlewarePriority;
}
middleware.priority = Number(middleware.priority);
index_1.api.tasks.middleware[middleware.name] = middleware;
if (middleware.global === true) {
index_1.api.tasks.globalMiddleware.push(middleware.name);
index_1.utils.sortGlobalMiddleware(index_1.api.tasks.globalMiddleware, index_1.api.tasks.middleware);
}
await index_1.api.tasks.loadTasks(true);
}
task_1.addMiddleware = addMiddleware;
async function validateInput(taskName, inputs) {
const task = index_1.api.tasks.tasks[taskName];
if (!task) {
throw new Error(`task ${taskName} not found`);
}
for (const key in task.inputs) {
// default
if (inputs[key] === undefined && task.inputs[key].default !== undefined) {
if (typeof task.inputs[key].default === "function") {
inputs[key] = await task.inputs[key].default.call(index_1.api, inputs[key], this);
}
else {
inputs[key] = task.inputs[key].default;
}
}
// validator
if (inputs[key] !== undefined &&
task.inputs[key].validator !== undefined) {
let validatorResponse;
if (typeof task.inputs[key].validator === "function") {
// allowed to throw too
const method = task.inputs[key].validator;
validatorResponse = await method.call(index_1.api, inputs[key], this);
}
else {
const method = this.prepareStringMethod(task.inputs[key].validator);
validatorResponse = await method.call(index_1.api, inputs[key], this);
}
// validator function returned nothing; assume param is OK
if (validatorResponse === null || validatorResponse === undefined) {
// ok
}
// validator returned something that was not `true`
else if (validatorResponse !== true) {
if (validatorResponse === false) {
throw new Error(`${inputs[key]} is not a valid value for ${key} in task ${taskName}`);
}
else {
throw new Error(validatorResponse);
}
}
}
// required
if (task.inputs[key].required === true) {
if (index_1.config.general.missingParamChecks.indexOf(inputs[key]) >= 0) {
throw new Error(`${key} is a required input for task ${taskName}`);
}
}
}
}
function checkForRepeatRecurringTaskEnqueue(taskName, error) {
if (error.toString().match(/already enqueued at this time/)) {
// this is OK, the job was enqueued by another process as this method was running
(0, index_1.log)(`not enqueuing periodic task ${taskName} - error.toString()`, "warning");
}
else {
throw error;
}
}
})(task || (exports.task = task = {}));