semantic-network
Version:
A utility library for manipulating a list of links that form a semantic interface to a network of resources.
176 lines • 6.78 kB
JavaScript
import { __awaiter } from "tslib";
import Bottleneck from 'bottleneck';
import anylogger from 'anylogger';
const log = anylogger('Loader');
/**
* Loading service to allow for rate limiting and prioritising concurrent requests and
* being able to cancel some or all requests.
*
* Wraps bottleneck and axios cancellable in the background using es6 promises.
*
*/
export class BottleneckLoader {
constructor(options = {}) {
this._currentOptions = options;
this._limiter = BottleneckLoader.limiterFactory(options);
this.requests = new Map();
this._limiter.on(BottleneckLoader.event.ERROR, error => {
log.error('[Limiter] Error: %s', error);
});
this._limiter.on(BottleneckLoader.event.DEBUG, message => {
// this is quite noisy so limiting down to trace
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
if (log.level === 7) {
log.debug('[Limiter] %s', message);
}
});
}
/**
*/
static get defaultOptions() {
return {
// num of jobs that can be running at the same time
maxConcurrent: 5,
// immediately launch the next job
minTime: 0,
// default: how long can the queue get? At this stage unlimited
highWater: null,
// this is actually the default
strategy: Bottleneck.strategy.LEAK,
// use es6 promise over the default Bluebird
Promise: Promise,
};
}
/**
* @see {@link Bottleneck.on}
* @return {{EMPTY: string, IDLE: string, DROPPED: string, DEPLETED: string, DEBUG: string, ERROR: string}}
*/
static get event() {
return {
EMPTY: 'empty',
IDLE: 'idle',
DROPPED: 'dropped',
DEPLETED: 'depleted',
DEBUG: 'debug',
ERROR: 'error',
};
}
/**
* Access to the limiter. Chain the methods of this instance if you require it
*
* @example loader.limiter.on(loader.event.DEBUG, () => {});
* @example itemsInQueue = loader.limiter.queued();
* @example loader.limiter.schedule( ...
*/
get limiter() {
return this._limiter;
}
/**
* Current options in the limiter
*/
get currentOptions() {
return this._currentOptions;
}
/**
* Make a new limiter with the options
*/
static limiterFactory(options) {
log.debug('limiter factory created');
return new Bottleneck(Object.assign(Object.assign({}, BottleneckLoader.defaultOptions), options));
}
/**
* This method wraps the limiter scheduler because it cannot deal with multiple requests at the same time on
* the same 'id'. This queues up subsequent requests and then resolves them upon the original request.
*
* This is primarily used for GET requests.
*
* Note: this is a naive implementation of queue clearing.
*
* TODO: cancelled promises need to be cleared out of this queue too
*
* @see https://github.com/SGrondin/bottleneck/issues/68
*
*/
schedule(id, action, options) {
return __awaiter(this, void 0, void 0, function* () {
log.debug('request queue pending (%s total)', this.requests.size);
const request = this.requests.get(id);
if (!request) {
const p = new Promise((resolve, reject) => __awaiter(this, void 0, void 0, function* () {
try {
const { loaderJob } = Object.assign({}, options);
const result = yield this._limiter.schedule(Object.assign(Object.assign({}, loaderJob), { id }), action);
// Do this before request is resolved,
// so a request with the same id must now resolve to a new request
log.debug('request queue remove \'%s\'', id);
this.requests.delete(id);
// resolving with chain through to the pending requests
resolve(result);
}
catch (error) {
// Do this before request is resolved,
// so a request with the same id must now resolve to a new request
this.requests.delete(id);
reject(error);
}
}));
this.requests.set(id, { request: p, promises: [] });
log.debug('request queue add \'%s\'', id);
return p;
}
else {
// construct an array of promises that will be resolved with the original request value
const p = new Promise((resolve, reject) => __awaiter(this, void 0, void 0, function* () {
try {
const result = yield request.request;
resolve(result);
}
catch (e) {
reject(e);
}
}));
request.promises.push(p);
log.debug('request queue resolved \'%s\' (%s in queue)', id, request.promises.length);
return p;
}
});
}
/**
* This method wraps the limiter scheduler.
*
* This is primarily used for POST, PUT, PATCH, DELETE requests
*/
submit(action, options) {
return this._limiter.schedule(action, options);
}
/**
* Stop all current and pending requests and reset all queues.
*/
clearAll() {
return __awaiter(this, void 0, void 0, function* () {
const { RECEIVED, RUNNING, EXECUTING, QUEUED } = this._limiter.counts();
const itemsQueued = RECEIVED + QUEUED + RUNNING + EXECUTING;
if (itemsQueued === 0) {
log.debug('no requests to clear');
return;
}
log.debug('aborting all request (%s in queue)', itemsQueued);
// this will abort any xhr requests
try {
yield this._limiter.stop();
// unfortunately, we still need one! TODO: ask library for update to be able to clear queues and keep running
this._limiter = BottleneckLoader.limiterFactory(this._currentOptions);
}
catch (e) {
log.warn('stopping loader failure');
}
});
}
getRequest(id) {
return this.requests.get(id);
}
}
const bottleneckLoader = new BottleneckLoader();
export { bottleneckLoader };
//# sourceMappingURL=bottleneckLoader.js.map