@grouparoo/core
Version:
The Grouparoo Core
101 lines (100 loc) • 3.95 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.test = exports.DEFAULT = void 0;
const actionhero_1 = require("actionhero");
const namespace = "tasks";
exports.DEFAULT = {
[namespace]: () => {
return {
_toExpand: false,
// Should this node run a scheduler to promote delayed tasks?
scheduler: (process.env.WORKERS ? parseInt(process.env.WORKERS) : 0) > 0,
// what queues should the taskProcessors work?
queues: async () => {
var _a;
return [].concat(["system", "imports", "runs", "schedules", "groups", "exports"], (_a = actionhero_1.api === null || actionhero_1.api === void 0 ? void 0 : actionhero_1.api.plugins) === null || _a === void 0 ? void 0 : _a.plugins.filter((plugin) => { var _a; return ((_a = plugin.apps) === null || _a === void 0 ? void 0 : _a.length) > 0; }).flatMap((plugin) => plugin.apps.map((app) => `exports:${app.name}`)), [
"records",
"recordProperties",
"destinations",
"properties",
"sources",
"apps",
"models",
"default",
]);
},
workerLogging: {
failure: "error",
success: "info",
start: "info",
end: "info",
cleaning_worker: "info",
poll: "debug",
job: "debug",
pause: "debug",
reEnqueue: "debug",
internalError: "error",
multiWorkerAction: "debug",
},
// Logging levels of the task scheduler
schedulerLogging: {
start: "info",
end: "info",
poll: "debug",
enqueue: "debug",
working_timestamp: "debug",
reEnqueue: "debug",
transferred_job: "debug",
},
// how long to sleep between jobs / scheduler checks
timeout: 2500,
// at minimum, how many parallel taskProcessors should this node spawn?
// (have number > 0 to enable, and < 1 to disable)
minTaskProcessors: process.env.WORKERS
? parseInt(process.env.WORKERS)
: 0,
// at maximum, how many parallel taskProcessors should this node spawn?
maxTaskProcessors: process.env.WORKERS
? parseInt(process.env.WORKERS)
: 0,
// how often should we check the event loop to spawn more taskProcessors?
checkTimeout: 500,
// how many ms would constitute an event loop delay to halt taskProcessors spawning?
maxEventLoopDelay: 5,
// how long before we mark a resque worker / task processor as stuck/dead?
stuckWorkerTimeout: 1000 * 60 * 10,
// should the scheduler automatically try to retry failed tasks which were failed due to being 'stuck'?
retryStuckJobs: true,
// Customize Resque primitives, replace null with required replacement.
resque_overrides: {
queue: null,
multiWorker: null,
scheduler: null,
},
connectionOptions: {
tasks: {},
},
};
},
};
exports.test = {
[namespace]: () => {
return {
queues: [
"imports",
"records",
"exports",
"runs",
"groups",
"schedules",
"destinations",
"properties",
"sources",
"apps",
"default",
],
timeout: 100,
checkTimeout: 50,
};
},
};