smc-hub
Version:
CoCalc: Backend webserver component
267 lines (239 loc) • 9.91 kB
JavaScript
// Generated by CoffeeScript 2.5.1
(function() {
//########################################################################
// This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.
// License: AGPLv3 s.t. "Commons Clause" – see LICENSE.md for details
//########################################################################
/*
User query queue.
The point of this is to make it so:
(1) there is a limit on the number of simultaneous queries that a single connected client
can make to the database, and
(2) when the client disconnects, any outstanding (not started) queries are cancelled, and
(3) queries that don't even start until a certain amount of time after they were made are
automatically considered to have failed (so the client retries).
*/
var GLOBAL_LIMIT, MAX_QUEUE_SIZE, TIME_HISTORY_LENGTH, USER_QUERY_LIMIT, USER_QUERY_TIMEOUT_MS, defaults, global_count, metrics_recorder, misc, query_queue_done, query_queue_duration, query_queue_exec, required;
({defaults} = misc = require('smc-util/misc'));
required = defaults.required;
// We do at most this many user queries **at once** to the database on behalf
// of each connected client. This only applies when the global limit has
// been exceeded.
USER_QUERY_LIMIT = 10;
// If we don't even start query by this long after we receive query, then we consider it failed
USER_QUERY_TIMEOUT_MS = 15000;
// How many recent query times to save for each client.
// This is currently not used for anything except logging.
TIME_HISTORY_LENGTH = 100;
// Do not throttle queries at all unless there are at least this
// many global outstanding concurrent **user queries**. The point is that
// if there's very little load, we should get queries done as fast
// as possible for users.
GLOBAL_LIMIT = 250;
// Maximum queue size -- if user tries to do more queries than this
// at once, then all old ones return an error. They could then retry.
MAX_QUEUE_SIZE = 150; // client isn't supposed to send more than around 25-50 at once.
// setup metrics
metrics_recorder = require('./metrics-recorder');
query_queue_exec = metrics_recorder.new_counter('query_queue_executed_total', 'Executed queries and their status', ['status']);
query_queue_duration = metrics_recorder.new_counter('query_queue_duration_seconds_total', 'Total time it took to evaluate queries');
query_queue_done = metrics_recorder.new_counter('query_queue_done_total', 'Total number of evaluated queries');
//query_queue_info = metrics_recorder.new_gauge('query_queue_info', 'Information update about outstanding queries in the queue', ['client', 'info'])
global_count = 0;
exports.UserQueryQueue = class UserQueryQueue {
constructor(opts) {
this.destroy = this.destroy.bind(this);
this.cancel_user_queries = this.cancel_user_queries.bind(this);
this.user_query = this.user_query.bind(this);
this._do_one_query = this._do_one_query.bind(this);
this._update = this._update.bind(this);
this._discard_next_call = this._discard_next_call.bind(this);
this._process_next_call = this._process_next_call.bind(this);
this._avg = this._avg.bind(this);
this._info = this._info.bind(this);
opts = defaults(opts, {
do_query: required,
dbg: required,
limit: USER_QUERY_LIMIT,
timeout_ms: USER_QUERY_TIMEOUT_MS,
global_limit: GLOBAL_LIMIT,
concurrent: required
});
this._do_query = opts.do_query;
this._limit = opts.limit;
this._dbg = opts.dbg;
this._timeout_ms = opts.timeout_ms;
this._global_limit = opts.global_limit;
this._state = {};
this._concurrent = opts.concurrent;
}
destroy() {
delete this._do_query;
delete this._limit;
delete this._timeout_ms;
delete this._dbg;
delete this._state;
return delete this._global_limit;
}
cancel_user_queries(opts) {
var ref, state;
opts = defaults(opts, {
client_id: required
});
state = this._state[opts.client_id];
this._dbg(`cancel_user_queries(client_id='${opts.client_id}') -- discarding ${state != null ? (ref = state.queue) != null ? ref.length : void 0 : void 0}`);
if (state != null) {
delete state.queue; // so we will stop trying to do queries for this client
return delete this._state[opts.client_id];
}
}
user_query(opts) {
var client_id, state;
opts = defaults(opts, {
client_id: required,
priority: void 0, // (NOT IMPLEMENTED) priority for this query
// (an integer [-10,...,19] like in UNIX)
account_id: void 0,
project_id: void 0,
query: required,
options: [],
changes: void 0,
cb: void 0
});
client_id = opts.client_id;
this._dbg(`user_query(client_id='${client_id}')`);
state = this._state[client_id];
if (state == null) {
state = this._state[client_id] = {
client_id: client_id,
queue: [], // queries in the queue
count: 0, // number of queries currently outstanding (waiting for these to finish)
sent: 0, // total number of queries sent to database
time_ms: [] // how long recent queries took in ms times_ms[times_ms.length-1] is most recent
};
}
opts.time = new Date();
state.queue.push(opts);
state.sent += 1;
return this._update(state);
}
_do_one_query(opts, cb) {
var client_id, id, orig_cb, tm;
if (new Date() - opts.time >= this._timeout_ms) {
this._dbg("_do_one_query -- timed out");
if (typeof opts.cb === "function") {
opts.cb("timeout");
}
cb();
query_queue_exec.labels('timeout').inc();
return;
}
id = misc.uuid().slice(0, 6);
tm = new Date();
client_id = opts.client_id;
this._dbg(`_do_one_query(client_id='${client_id}', query_id='${id}') -- doing the query`);
// Actually do the query
orig_cb = opts.cb;
// Remove the two properties from opts that @_do_query doesn't take
// as inputs, and of course we do not need anymore.
delete opts.time; // no longer matters
delete opts.client_id;
delete opts.priority;
// Set a cb that calls our cb exactly once, but sends anything
// it receives to the orig_cb, if there is one.
opts.cb = (err, result) => {
if (cb != null) {
this._dbg(`_do_one_query(client_id='${client_id}', query_id='${id}') -- done; time=${new Date() - tm}ms`);
cb();
cb = void 0;
}
if ((result != null ? result.action : void 0) === 'close' || err) {
// I think this is necessary for this closure to ever
// get garbage collected. **Not tested, and this could be bad.**
delete opts.cb;
}
return typeof orig_cb === "function" ? orig_cb(err, result) : void 0;
};
// Increment counter
query_queue_exec.labels('sent').inc();
// Finally, do the query.
return this._do_query(opts);
}
_update(state) {
var results;
if ((state.queue == null) || state.queue.length === 0) {
return;
}
// Discard all additional messages beyond outstanding and in queue. The client is
// assumed to be smart enough to try again.
while (state.queue.length + state.count > MAX_QUEUE_SIZE) {
this._discard_next_call(state);
}
results = [];
// Now handle the remaining messages up to the limit.
while (state.queue.length > 0 && (this._concurrent() < this._global_limit || state.count < this._limit)) {
results.push(this._process_next_call(state));
}
return results;
}
_discard_next_call(state) {
var opts;
if ((state.queue == null) || state.queue.length === 0) {
return;
}
this._dbg(`_discard_next_call -- discarding (queue size=${state.queue.length})`);
opts = state.queue.shift();
opts.cb("discarded");
return this._info(state);
}
_process_next_call(state) {
var opts, tm;
if ((state.queue == null) || state.queue.length === 0) {
return;
}
state.count += 1;
global_count += 1;
opts = state.queue.shift();
this._info(state);
tm = new Date();
return this._do_one_query(opts, () => {
var duration_ms;
state.count -= 1;
global_count -= 1;
duration_ms = new Date() - tm;
state.time_ms.push(duration_ms);
query_queue_duration.inc(duration_ms / 1000);
query_queue_done.inc(1);
if (state.time_ms.length > TIME_HISTORY_LENGTH) {
state.time_ms.shift();
}
this._info(state);
return this._update(state);
});
}
_avg(state) {
var a, i, len, s, v;
// recent average time
v = state.time_ms.slice(state.time_ms.length - 10);
if (v.length === 0) {
return 0;
}
s = 0;
for (i = 0, len = v.length; i < len; i++) {
a = v[i];
s += a;
}
return s / v.length;
}
_info(state) {
var avg, ref;
avg = this._avg(state);
//query_queue_info.labels(state.client_id, 'count').set(state.count)
//query_queue_info.labels(state.client_id, 'avg').set(avg)
//query_queue_info.labels(state.client_id, 'length').set(state.queue?.length ? 0)
//query_queue_info.labels(state.client_id, 'sent').set(state.sent)
return this._dbg(`client_id='${state.client_id}': avg=${avg}ms, count(local=${state.count},global=${global_count}), queued.length=${(ref = state.queue) != null ? ref.length : void 0}, sent=${state.sent}`);
}
};
}).call(this);
//# sourceMappingURL=postgres-user-query-queue.js.map