@sidequest/engine
Version:
@sidequest/engine is the core engine of SideQuest, a distributed background job processing system for Node.js and TypeScript.
173 lines (169 loc) • 6.8 kB
JavaScript
;
var core = require('@sidequest/core');
var EventEmitter = require('events');
var util = require('util');
var jobTransitioner = require('../job/job-transitioner.cjs');
require('node:fs');
require('node:url');
require('node:path');
var runnerPool = require('../shared-runner/runner-pool.cjs');
/**
* Manages job execution and worker concurrency for Sidequest.
*/
class ExecutorManager {
backend;
nonNullConfig;
activeByQueue;
activeJobs;
runnerPool;
/**
* Creates a new ExecutorManager.
* @param backend The backend instance.
* @param nonNullConfig The non-nullable engine configuration.
*/
constructor(backend, nonNullConfig) {
this.backend = backend;
this.nonNullConfig = nonNullConfig;
this.activeByQueue = {};
this.activeJobs = new Set();
this.runnerPool = new runnerPool.RunnerPool(this.nonNullConfig);
}
/**
* Gets the number of available slots for a given queue.
* @param queueConfig The queue configuration.
* @returns The number of available slots.
*/
availableSlotsByQueue(queueConfig) {
if (!this.activeByQueue[queueConfig.name]) {
this.activeByQueue[queueConfig.name] = new Set();
}
const activeJobs = this.activeByQueue[queueConfig.name];
const limit = queueConfig.concurrency ?? 10;
const availableSlots = limit - activeJobs.size;
if (availableSlots < 0) {
return 0;
}
return availableSlots;
}
/**
* Gets the number of available slots globally.
* @returns The number of available slots.
*/
availableSlotsGlobal() {
const limit = this.nonNullConfig.maxConcurrentJobs;
const availableSlots = limit - this.activeJobs.size;
if (availableSlots < 0) {
return 0;
}
return availableSlots;
}
/**
* Gets the total number of active workers.
* @returns The number of active jobs.
*/
totalActiveWorkers() {
return this.activeJobs.size;
}
/**
* Prepares a job for execution by marking it as active and adding it to a queue slot.
* @param queueConfig The queue configuration.
* @param job The job data.
*/
queueJob(queueConfig, job) {
if (!this.activeByQueue[queueConfig.name]) {
this.activeByQueue[queueConfig.name] = new Set();
}
this.activeByQueue[queueConfig.name].add(job.id);
this.activeJobs.add(job.id);
}
/**
* Executes a job in the given queue.
* @param queueConfig The queue configuration.
* @param job The job data to execute.
*/
async execute(queueConfig, job) {
let isRunning = false;
try {
core.logger("Executor Manager").debug(`Submitting job ${job.id} for execution in queue ${queueConfig.name}`);
// We call prepareJob here again to make sure the jobs are in the queues.
// This might not be necessary, but for the sake of consistency we do it.
this.queueJob(queueConfig, job);
job = await jobTransitioner.JobTransitioner.apply(this.backend, job, new core.RunTransition());
isRunning = true;
const signal = new EventEmitter();
const cancellationCheck = async () => {
while (isRunning) {
const watchedJob = await this.backend.getJob(job.id);
if (watchedJob.state === "canceled") {
core.logger("Executor Manager").debug(`Emitting abort signal for job ${job.id}`);
signal.emit("abort");
isRunning = false;
return;
}
await new Promise((r) => setTimeout(r, 1000));
}
};
void cancellationCheck();
core.logger("Executor Manager").debug(`Running job ${job.id} in queue ${queueConfig.name}`);
const runPromise = this.runnerPool.run(job, signal);
if (job.timeout) {
void new Promise(() => {
setTimeout(() => {
core.logger("Executor Manager").debug(`Job ${job.id} timed out after ${job.timeout}ms, aborting.`);
signal.emit("abort");
void jobTransitioner.JobTransitioner.apply(this.backend, job, new core.RetryTransition(`Job timed out after ${job.timeout}ms`));
}, job.timeout);
});
}
const result = await runPromise;
isRunning = false;
core.logger("Executor Manager").debug(`Job ${job.id} completed with result: ${util.inspect(result)}`);
const transition = core.JobTransitionFactory.create(result);
await jobTransitioner.JobTransitioner.apply(this.backend, job, transition);
}
catch (error) {
isRunning = false;
const err = error;
if (err.message === "The task has been aborted") {
core.logger("Executor Manager").debug(`Job ${job.id} was aborted`);
}
else {
core.logger("Executor Manager").error(`Unhandled error while executing job ${job.id}: ${err.message}`);
await jobTransitioner.JobTransitioner.apply(this.backend, job, new core.RetryTransition(err));
}
}
finally {
isRunning = false;
this.activeByQueue[queueConfig.name].delete(job.id);
this.activeJobs.delete(job.id);
}
}
/**
* Destroys the runner pool and releases resources.
*/
async destroy() {
await new Promise((resolve, reject) => {
const checkJobs = () => {
if (this.totalActiveWorkers() === 0) {
core.logger("ExecutorManager").info("All active jobs finished. Destroying runner pool.");
try {
this.runnerPool.destroy();
core.logger("ExecutorManager").debug("Runner pool destroyed. Returning.");
resolve();
}
catch (error) {
core.logger("ExecutorManager").error("Error while destroying runner pool:", error);
reject(error);
}
}
else {
core.logger("ExecutorManager").info(`Waiting for ${this.totalActiveWorkers()} active jobs to finish...`);
setTimeout(checkJobs, 1000);
}
};
void checkJobs();
});
}
}
exports.ExecutorManager = ExecutorManager;
//# sourceMappingURL=executor-manager.cjs.map