smc-hub
Version:
CoCalc: Backend webserver component
1,345 lines (1,295 loc) • 78.1 kB
JavaScript
// Generated by CoffeeScript 2.5.1
(function() {
//########################################################################
// This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.
// License: AGPLv3 s.t. "Commons Clause" – see LICENSE.md for details
//########################################################################
// PostgreSQL -- basic queries and database interface
var DEFAULT_STATEMENT_TIMEOUT_S, DEFAULT_TIMEOUS_MS, DEFAULT_TIMEOUT_DELAY_MS, EventEmitter, LRU, NEEDS_QUOTING, QUERY_ALERT_THRESH_MS, SCHEMA, all_results, async, callback2, client_db, count_result, defaults, do_query_with_pg_params, escapeString, expire_time, fs, misc, misc_node, one_result, pg, pg_type, quote_field, read_db_password_from_disk, ref, required, validator, winston,
boundMethodCheck = function(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new Error('Bound instance method accessed before binding'); } },
indexOf = [].indexOf;
exports.DEBUG = true;
// If database conection is non-responsive but no error raised directly
// by db client, then we will know and fix, rather than just sitting there...
DEFAULT_TIMEOUS_MS = 60000;
// Do not test for non-responsiveness until a while after initial connection
// established, since things tend to work initially, *but* may also be much
// slower, due to tons of clients simultaneously connecting to DB.
DEFAULT_TIMEOUT_DELAY_MS = DEFAULT_TIMEOUS_MS * 4;
QUERY_ALERT_THRESH_MS = 5000;
// this is a limit for each query, unless timeout_s is specified.
// https://postgresqlco.nf/en/doc/param/statement_timeout/
DEFAULT_STATEMENT_TIMEOUT_S = 30;
EventEmitter = require('events');
fs = require('fs');
async = require('async');
escapeString = require('sql-string-escape');
validator = require('validator');
({callback2} = require('smc-util/async-utils'));
LRU = require('lru-cache');
pg = require('pg').native; // You might have to do: "apt-get install libpq5 libpq-dev"
if (pg == null) {
throw Error("YOU MUST INSTALL the pg-native npm module");
}
// You can uncommment this to use the pure javascript driver.
// However: (1) it can be 5x slower or more!
// (2) I think it corrupts something somehow in a subtle way, since our whole
// syncstring system was breaking... until I switched to native. Not sure.
//pg = require('pg')
winston = require('./logger').getLogger('postgres');
({do_query_with_pg_params} = require('./postgres/set-pg-params'));
misc_node = require('smc-util-node/misc_node');
({defaults} = misc = require('smc-util/misc'));
required = defaults.required;
({SCHEMA, client_db} = require('smc-util/schema'));
exports.PUBLIC_PROJECT_COLUMNS = ['project_id', 'last_edited', 'title', 'description', 'deleted', 'created', 'env'];
exports.PROJECT_COLUMNS = ['users'].concat(exports.PUBLIC_PROJECT_COLUMNS);
({read_db_password_from_disk} = require('./utils'));
ref = exports.PostgreSQL = class PostgreSQL extends EventEmitter { // emits a 'connect' event whenever we successfully connect to the database and 'disconnect' when connection to postgres fails
constructor(opts) {
var dbg, i, ref1, ref2, ref3, ref4;
super();
this.clear_cache = this.clear_cache.bind(this);
this.close = this.close.bind(this);
/*
If @_timeout_ms is set, then we periodically do a simple test query,
to ensure that the database connection is working and responding to queries.
If the query below times out, then the connection will get recreated.
*/
this._do_test_query = this._do_test_query.bind(this);
this._init_test_query = this._init_test_query.bind(this);
this._close_test_query = this._close_test_query.bind(this);
this.connect = this.connect.bind(this);
this.disconnect = this.disconnect.bind(this);
this.is_connected = this.is_connected.bind(this);
this._connect = this._connect.bind(this);
// Return a native pg client connection. This will
// round robbin through all connections. It returns
// undefined if there are no connections.
this._client = this._client.bind(this);
this._dbg = this._dbg.bind(this);
this._init_metrics = this._init_metrics.bind(this);
this.async_query = this.async_query.bind(this);
this._query = this._query.bind(this);
this._query_retry_until_success = this._query_retry_until_success.bind(this);
this.__do_query = this.__do_query.bind(this);
// Special case of query for counting entries in a table.
this._count = this._count.bind(this);
this._validate_opts = this._validate_opts.bind(this);
this._ensure_database_exists = this._ensure_database_exists.bind(this);
this._confirm_delete = this._confirm_delete.bind(this);
this.set_random_password = this.set_random_password.bind(this);
// This will fail if any other clients have db open.
// This function is very important for automated testing.
this.delete_entire_database = this.delete_entire_database.bind(this);
// Deletes all the contents of the tables in the database. It doesn't
// delete anything about the schema itself: indexes or tables.
this.delete_all = this.delete_all.bind(this);
// return list of tables in the database
this._get_tables = this._get_tables.bind(this);
// Return list of columns in a given table
this._get_columns = this._get_columns.bind(this);
this._primary_keys = this._primary_keys.bind(this);
// Return *the* primary key, assuming unique; otherwise raise an exception.
this._primary_key = this._primary_key.bind(this);
this._create_table = this._create_table.bind(this);
this._create_indexes_queries = this._create_indexes_queries.bind(this);
this._create_indexes = this._create_indexes.bind(this);
// Ensure that for the given table, the actual schema and indexes
// in the database matches the one defined in SCHEMA.
this._update_table_schema = this._update_table_schema.bind(this);
this._update_table_schema_columns = this._update_table_schema_columns.bind(this);
this._update_table_schema_indexes = this._update_table_schema_indexes.bind(this);
this._throttle = this._throttle.bind(this);
// Ensure that the actual schema in the database matches the one defined in SCHEMA.
// This creates the initial schema, adds new columns fine, and in a VERY LIMITED
// range of cases, *might be* be able to change the data type of a column.
this.update_schema = this.update_schema.bind(this);
// Return the number of outstanding concurrent queries.
this.concurrent = this.concurrent.bind(this);
this.is_heavily_loaded = this.is_heavily_loaded.bind(this);
// Go through every table in the schema with a column called "expire", and
// delete every entry where expire is <= right now.
this.delete_expired = this.delete_expired.bind(this);
// count number of entries in a table
this.count = this.count.bind(this);
// sanitize strings before inserting them into a query string
this.sanitize = this.sanitize.bind(this);
opts = defaults(opts, {
host: (ref1 = process.env['PGHOST']) != null ? ref1 : 'localhost', // or 'hostname:port' or 'host1,host2,...' (multiple hosts) -- TODO -- :port only works for one host.
database: (ref2 = process.env['SMC_DB']) != null ? ref2 : 'smc',
user: (ref3 = process.env['PGUSER']) != null ? ref3 : 'smc',
debug: exports.DEBUG,
connect: true,
password: void 0,
cache_expiry: 5000, // expire cached queries after this many milliseconds
// keep this very short; it's just meant to reduce impact of a bunch of
// identical permission checks in a single user query.
cache_size: 300, // cache this many queries; use @_query(cache:true, ...) to cache result
concurrent_warn: 500,
concurrent_heavily_loaded: 70, // when concurrent hits this, consider load "heavy"; this changes home some queries behave to be faster but provide less info
ensure_exists: true, // ensure database exists on startup (runs psql in a shell)
timeout_ms: DEFAULT_TIMEOUS_MS, // **IMPORTANT: if *any* query takes this long, entire connection is terminated and recreated!**
timeout_delay_ms: DEFAULT_TIMEOUT_DELAY_MS // Only reconnect on timeout this many ms after connect. Motivation: on initial startup queries may take much longer due to competition with other clients.
});
this.setMaxListeners(10000); // because of a potentially large number of changefeeds
this._state = 'init';
this._debug = opts.debug;
this._timeout_ms = opts.timeout_ms;
this._timeout_delay_ms = opts.timeout_delay_ms;
this._ensure_exists = opts.ensure_exists;
this._init_test_query();
dbg = this._dbg("constructor"); // must be after setting @_debug above
dbg(opts);
i = opts.host.indexOf(':');
if (i !== -1) {
this._host = opts.host.slice(0, i);
this._port = parseInt(opts.host.slice(i + 1));
} else {
this._host = opts.host;
this._port = 5432;
}
this._concurrent_warn = opts.concurrent_warn;
this._concurrent_heavily_loaded = opts.concurrent_heavily_loaded;
this._user = opts.user;
this._database = opts.database;
this._password = (ref4 = opts.password) != null ? ref4 : read_db_password_from_disk();
this._init_metrics();
if (opts.cache_expiry && opts.cache_size) {
this._query_cache = new LRU({
max: opts.cache_size,
maxAge: opts.cache_expiry
});
}
if (opts.connect) {
this.connect(); // start trying to connect
}
}
clear_cache() {
var ref1;
boundMethodCheck(this, ref);
return (ref1 = this._query_cache) != null ? ref1.reset() : void 0;
}
close() {
var client, j, len, ref1;
boundMethodCheck(this, ref);
if (this._state === 'closed') { // nothing to do
return;
}
this._close_test_query();
this._state = 'closed';
this.emit('close');
this.removeAllListeners();
if (this._clients != null) {
ref1 = this._clients;
for (j = 0, len = ref1.length; j < len; j++) {
client = ref1[j];
client.removeAllListeners();
client.end();
}
return delete this._clients;
}
}
_do_test_query() {
var dbg;
boundMethodCheck(this, ref);
dbg = this._dbg('test_query');
dbg('starting');
return this._query({
query: 'SELECT NOW()',
cb: (err, result) => {
return dbg("finished", err, result);
}
});
}
_init_test_query() {
boundMethodCheck(this, ref);
if (!this._timeout_ms) {
return;
}
return this._test_query = setInterval(this._do_test_query, this._timeout_ms);
}
_close_test_query() {
boundMethodCheck(this, ref);
if (this._test_query != null) {
clearInterval(this._test_query);
return delete this._test_query;
}
}
engine() {
return 'postgresql';
}
connect(opts) {
var dbg;
boundMethodCheck(this, ref);
opts = defaults(opts, {
max_time: void 0, // set to something shorter to not try forever
// Only first max_time is used.
cb: void 0
});
if (this._state === 'closed') {
if (typeof opts.cb === "function") {
opts.cb("closed");
}
return;
}
dbg = this._dbg("connect");
if (this._clients != null) {
dbg("already connected");
if (typeof opts.cb === "function") {
opts.cb();
}
return;
}
if (this._connecting != null) {
dbg('already trying to connect');
this._connecting.push(opts.cb);
return;
}
dbg('will try to connect');
this._state = 'init';
if (opts.max_time) {
dbg(`for up to ${opts.max_time}ms`);
} else {
dbg("until successful");
}
this._connecting = [opts.cb];
return misc.retry_until_success({
f: this._connect,
max_delay: 10000,
max_time: opts.max_time,
start_delay: 500 + 500 * Math.random(),
log: dbg,
cb: (err) => {
var cb, j, len, v;
v = this._connecting;
delete this._connecting;
for (j = 0, len = v.length; j < len; j++) {
cb = v[j];
if (typeof cb === "function") {
cb(err);
}
}
if (!err) {
this._state = 'connected';
return this.emit('connect');
}
}
});
}
disconnect() {
var client, j, len, ref1;
boundMethodCheck(this, ref);
if (this._clients != null) {
ref1 = this._clients;
for (j = 0, len = ref1.length; j < len; j++) {
client = ref1[j];
client.end();
client.removeAllListeners();
}
}
return delete this._clients;
}
is_connected() {
boundMethodCheck(this, ref);
return (this._clients != null) && this._clients.length > 0;
}
_connect(cb) {
var dbg, locals;
boundMethodCheck(this, ref);
dbg = this._dbg("_do_connect");
dbg(`connect to ${this._host}`);
this._clear_listening_state(); // definitely not listening
if (this._clients != null) {
this.disconnect();
}
locals = {
clients: [],
hosts: []
};
this._connect_time = 0;
this._concurrent_queries = 0; // can't be any going on now.
return async.series([
(cb) => {
if (this._ensure_exists) {
dbg("first make sure db exists");
return this._ensure_database_exists(cb);
} else {
dbg("assuming database exists");
return cb();
}
},
(cb) => {
var f;
if (!this._host) { // undefined if @_host=''
locals.hosts = [void 0];
cb();
return;
}
if (this._host.indexOf('/') !== -1) {
dbg("using a local socket file (not a hostname)");
locals.hosts = [this._host];
cb();
return;
}
f = (host,
cb) => {
var hostname;
hostname = host.split(':')[0];
winston.debug(`Looking up ip addresses of ${hostname}`);
return require('dns').lookup(hostname,
{
all: true
},
(err,
ips) => {
var j,
len,
x;
if (err) {
winston.debug(`Got ${hostname} --> err=${err}`);
// NON-FATAL -- we just don't include these and hope to
// have at least one total working host...
return cb();
} else {
winston.debug(`Got ${hostname} --> ${JSON.stringify(ips)}`);
// In kubernetes the stateful set service just has
// lots of ip address. We connect to *all* of them,
// and spread queries across them equally.
for (j = 0, len = ips.length; j < len; j++) {
x = ips[j];
locals.hosts.push(x.address);
}
return cb();
}
});
};
return async.map(this._host.split(','),
f,
cb);
},
async(cb) => {
var c,
f,
host,
init_client,
j,
len,
ref1;
dbg(`connecting to ${JSON.stringify(locals.hosts)}...`);
if (locals.hosts.length === 0) {
dbg("locals.hosts has length 0 -- no available db");
cb("no databases available");
return;
}
dbg("create client and start connecting...");
locals.clients = [];
// Use a function to initialize the client, to avoid any
// issues with scope of "client" below.
init_client = (host) => {
var client;
client = new pg.Client({
user: this._user,
host: host,
port: this._port,
password: this._password,
database: this._database
});
if (this._notification != null) {
client.on('notification',
this._notification);
}
client.on('error',
(err) => {
if (this._state === 'init') {
return;
}
// already started connecting
this.emit('disconnect');
dbg(`error -- ${err}`);
this.disconnect();
return this.connect(); // start trying to reconnect
});
client.setMaxListeners(1000); // there is one emitter for each concurrent query... (see query_cb)
return locals.clients.push(client);
};
ref1 = locals.hosts;
for (j = 0, len = ref1.length; j < len; j++) {
host = ref1[j];
init_client(host);
}
// Connect the clients. If at least one succeeds, we use this.
// If none succeed, we declare failure.
// Obviously, this is NOT optimal -- it's just hopefully sufficiently robust/works.
// I'm going to redo this with experience.
locals.clients_that_worked = [];
locals.errors = [];
f = async(c) => {
var err;
try {
await c.connect();
return locals.clients_that_worked.push(c);
} catch (error) {
err = error;
return locals.errors.push(err);
}
};
await Promise.all((function() {
var l,
len1,
ref2,
results;
ref2 = locals.clients;
results = [];
for (l = 0, len1 = ref2.length; l < len1; l++) {
c = ref2[l];
results.push(f(c));
}
return results;
})());
if (locals.clients_that_worked.length === 0) {
dbg("ALL clients failed",
locals.errors);
return cb("ALL clients failed to connect");
} else {
// take what we got
if (locals.clients.length === locals.clients_that_worked.length) {
dbg("ALL clients worked");
} else {
dbg(`ONLY ${locals.clients_that_worked.length} clients worked`);
}
locals.clients = locals.clients_that_worked;
return cb();
}
},
(cb) => {
var f;
this._connect_time = new Date();
locals.i = 0;
// Weird and unfortunate fact -- this query can and does **HANG** never returning
// in some edge cases. That's why we have to be paranoid about this entire _connect
// function...
f = (client,
cb) => {
var it_hung,
timeout;
it_hung = () => {
cb("hung");
return cb = void 0;
};
timeout = setTimeout(it_hung,
15000);
dbg(`now connected; checking if we can actually query the DB via client ${locals.i}`);
locals.i += 1;
return client.query("SELECT NOW()",
(err) => {
clearTimeout(timeout);
return cb(err);
});
};
return async.map(locals.clients,
f,
cb);
},
(cb) => {
var f;
// we set a statement_timeout, to avoid queries locking up PG
f = (client,
cb) => {
var statement_timeout_ms;
statement_timeout_ms = DEFAULT_STATEMENT_TIMEOUT_S * 1000; // in millisecs
return client.query(`SET statement_timeout TO ${statement_timeout_ms}`,
(err) => {
return cb(err);
});
};
return async.map(locals.clients,
f,
cb);
},
(cb) => {
var f;
dbg("checking if ANY db server is in recovery, i.e., we are doing standby queries only");
this.is_standby = false;
f = (client,
cb) => {
// Is this a read/write or read-only connection?
return client.query("SELECT pg_is_in_recovery()",
(err,
resp) => {
if (err) {
return cb(err);
} else {
// True if ANY db connection is read only.
if (resp.rows[0].pg_is_in_recovery) {
this.is_standby = true;
}
return cb();
}
});
};
return async.map(locals.clients,
f,
cb);
}
], (err) => {
var mesg;
if (err) {
mesg = `Failed to connect to database -- ${err}`;
dbg(mesg);
console.warn(mesg); // make it clear for interactive users with debugging off -- common mistake with env not setup right.
return typeof cb === "function" ? cb(err) : void 0;
} else {
this._clients = locals.clients;
this._concurrent_queries = 0;
dbg("connected!");
return typeof cb === "function" ? cb(void 0, this) : void 0;
}
});
}
_client() {
boundMethodCheck(this, ref);
if (this._clients == null) {
return;
}
if (this._clients.length <= 1) {
return this._clients[0];
}
if (this._client_index == null) {
this._client_index = -1;
}
this._client_index = this._client_index + 1;
if (this._client_index >= this._clients.length) {
this._client_index = 0;
}
return this._clients[this._client_index];
}
_dbg(f) {
boundMethodCheck(this, ref);
if (this._debug) {
return (m) => {
return winston.debug(`PostgreSQL.${f}: ${misc.trunc_middle(JSON.stringify(m), 250)}`);
};
} else {
return function() {};
}
}
_init_metrics() {
var MetricsRecorder;
boundMethodCheck(this, ref);
// initialize metrics
MetricsRecorder = require('./metrics-recorder');
this.query_time_histogram = MetricsRecorder.new_histogram('db_query_ms_histogram', 'db queries', {
buckets: [1, 5, 10, 20, 50, 100, 200, 500, 1000, 5000, 10000],
labels: ['table']
});
return this.concurrent_counter = MetricsRecorder.new_counter('db_concurrent_total', 'Concurrent queries (started and finished)', ['state']);
}
async async_query(opts) {
boundMethodCheck(this, ref);
return (await callback2(this._query.bind(this), opts));
}
_query(opts) {
var dbg;
boundMethodCheck(this, ref);
opts = defaults(opts, {
query: void 0, // can give select and table instead
select: void 0, // if given, should be string or array of column names -| can give these
table: void 0, // if given, name of table -| two instead of query
params: [],
cache: false, // Will cache results for a few seconds or use cache. Use this
// when speed is very important, and results that are a few seconds
// out of date are fine.
where: void 0, // Used for SELECT: If given, can be
// - a map with keys clauses with $::TYPE (not $1::TYPE!) and values
// the corresponding params. Also, WHERE must not be in the query already.
// If where[cond] is undefined, then cond is completely **ignored**.
// - a string, which is inserted as is as a normal WHERE condition.
// - an array of maps or strings.
set: void 0, // Appends a SET clause to the query; same format as values.
values: void 0, // Used for INSERT: If given, then params and where must not be given. Values is a map
// {'field1::type1':value, , 'field2::type2':value2, ...} which gets converted to
// ' (field1, field2, ...) VALUES ($1::type1, $2::type2, ...) '
// with corresponding params set. Undefined valued fields are ignored and types may be omited.
conflict: void 0, // If given, then values must also be given; appends this to query:
// ON CONFLICT (name) DO UPDATE SET value=EXCLUDED.value'
// Or, if conflict starts with "ON CONFLICT", then just include as is, e.g.,
// "ON CONFLICT DO NOTHING"
jsonb_set: void 0, // Used for setting a field that contains a JSONB javascript map.
// Give as input an object
// { field1:{key1:val1, key2:val2, ...}, field2:{key3:val3,...}, ...}
// In each field, every key has the corresponding value set, unless val is undefined/null, in which
// case that key is deleted from the JSONB object fieldi. Simple as that! This is much, much
// cleaner to use than SQL. Also, if the value in fieldi itself is NULL, it gets
// created automatically.
jsonb_merge: void 0, // Exactly like jsonb_set, but when val1 (say) is an object, it merges that object in,
// *instead of* setting field1[key1]=val1. So after this field1[key1] has what was in it
// and also what is in val1. Obviously field1[key1] had better have been an array or NULL.
order_by: void 0,
limit: void 0,
offset: void 0,
safety_check: true,
retry_until_success: void 0, // if given, should be options to misc.retry_until_success
pg_params: void 0, // key/value map of postgres parameters, which will be set for the query in a single transaction
timeout_s: void 0, // by default, there is a "statement_timeout" set. set to 0 to disable or a number in seconds
cb: void 0
});
// quick check for write query against read-only connection
if (this.is_standby && ((opts.set != null) || (opts.jsonb_set != null) || (opts.jsonb_merge != null))) {
if (typeof opts.cb === "function") {
opts.cb("set queries against standby not allowed");
}
return;
}
if (opts.retry_until_success) {
this._query_retry_until_success(opts);
return;
}
if (!this.is_connected()) {
dbg = this._dbg("_query");
dbg("connecting first...");
return this.connect({
max_time: 45000, // don't try forever; queries could pile up.
cb: (err) => {
if (err) {
dbg(`FAILED to connect -- ${err}`);
return typeof opts.cb === "function" ? opts.cb("database is down (please try later)") : void 0;
} else {
dbg("connected, now doing query");
return this.__do_query(opts);
}
}
});
} else {
return this.__do_query(opts);
}
}
_query_retry_until_success(opts) {
var args, f, orig_cb, retry_opts;
boundMethodCheck(this, ref);
retry_opts = opts.retry_until_success;
orig_cb = opts.cb;
delete opts.retry_until_success;
// f just calls @_do_query, but with a different cb (same opts)
args = void 0;
f = (cb) => {
opts.cb = (...args0) => {
args = args0;
return cb(args[0]);
};
return this._query(opts);
};
retry_opts.f = f;
// When misc.retry_until_success finishes, it calls this, which just
// calls the original cb.
retry_opts.cb = (err) => {
if (err) {
return typeof orig_cb === "function" ? orig_cb(err) : void 0;
} else {
return typeof orig_cb === "function" ? orig_cb(...args) : void 0;
}
};
// OK, now start it attempting.
return misc.retry_until_success(retry_opts);
}
__do_query(opts) {
var SET, WHERE, client, conflict, data, dbg, e, err, error_listener, field, fields, fields_to_index, finished, full_query_string, j, k, l, len, len1, len2, n, o, p, param, push_param, push_where, query_cb, ref1, ref2, ref3, ref4, ref5, ref6, ref7, safety_check, set, start, timeout_error, timer, type, v, value, values, x, y;
boundMethodCheck(this, ref);
dbg = this._dbg(`_query('${misc.trunc((ref1 = opts.query) != null ? ref1.replace(/\n/g, " ") : void 0, 250)}',id='${misc.uuid().slice(0, 6)}')`);
if (!this.is_connected()) {
if (typeof opts.cb === "function") {
opts.cb("client not yet initialized");
}
return;
}
if ((opts.params != null) && !misc.is_array(opts.params)) {
if (typeof opts.cb === "function") {
opts.cb("params must be an array");
}
return;
}
if (opts.query == null) {
if (opts.table == null) {
if (typeof opts.cb === "function") {
opts.cb("if query not given, then table must be given");
}
return;
}
if (opts.select == null) {
opts.select = '*';
}
if (misc.is_array(opts.select)) {
opts.select = ((function() {
var j, len, ref2, results;
ref2 = opts.select;
results = [];
for (j = 0, len = ref2.length; j < len; j++) {
field = ref2[j];
results.push(quote_field(field));
}
return results;
})()).join(',');
}
opts.query = `SELECT ${opts.select} FROM \"${opts.table}\"`;
delete opts.select;
}
push_param = function(param, type) {
if ((type != null ? type.toUpperCase() : void 0) === 'JSONB') {
param = misc.to_json(param); // I don't understand why this is needed by the driver....
}
opts.params.push(param);
return opts.params.length;
};
if (opts.jsonb_merge != null) {
if (opts.jsonb_set != null) {
if (typeof opts.cb === "function") {
opts.cb("if jsonb_merge is set then jsonb_set must not be set");
}
return;
}
opts.jsonb_set = opts.jsonb_merge;
}
SET = [];
if (opts.jsonb_set != null) {
// This little piece of very hard to write (and clever?) code
// makes it so we can set or **merge in at any nested level** (!)
// arbitrary JSON objects. We can also delete any key at any
// level by making the value null or undefined! This is amazingly
// easy to use in queries -- basically making JSONP with postgres
// as expressive as RethinkDB REQL (even better in some ways).
set = (field, data, path) => {
var key, obj, subobj, val;
obj = `COALESCE(${field}#>'{${path.join(',')}}', '{}'::JSONB)`;
for (key in data) {
val = data[key];
if (val == null) {
// remove key from object
obj = `(${obj} - '${key}')`;
} else {
if ((opts.jsonb_merge != null) && (typeof val === 'object' && !misc.is_date(val))) {
subobj = set(field, val, path.concat([key]));
obj = `JSONB_SET(${obj}, '{${key}}', ${subobj})`;
} else {
// completely replace field[key] with val.
obj = `JSONB_SET(${obj}, '{${key}}', $${push_param(val, 'JSONB')}::JSONB)`;
}
}
}
return obj;
};
v = (function() {
var ref2, results;
ref2 = opts.jsonb_set;
results = [];
for (field in ref2) {
data = ref2[field];
results.push(`${field}=${set(field, data, [])}`);
}
return results;
})();
SET.push(...v);
}
if (opts.values != null) {
//dbg("values = #{misc.to_json(opts.values)}")
if (opts.where != null) {
if (typeof opts.cb === "function") {
opts.cb("where must not be defined if opts.values is defined");
}
return;
}
if (misc.is_array(opts.values)) {
// An array of numerous separate object that we will insert all at once.
// Determine the fields, which as the union of the keys of all values.
fields = {};
ref2 = opts.values;
for (j = 0, len = ref2.length; j < len; j++) {
x = ref2[j];
if (!misc.is_object(x)) {
if (typeof opts.cb === "function") {
opts.cb("if values is an array, every entry must be an object");
}
return;
}
for (k in x) {
p = x[k];
fields[k] = true;
}
}
// convert to array
fields = misc.keys(fields);
fields_to_index = {};
n = 0;
for (l = 0, len1 = fields.length; l < len1; l++) {
field = fields[l];
fields_to_index[field] = n;
n += 1;
}
values = [];
ref3 = opts.values;
for (o = 0, len2 = ref3.length; o < len2; o++) {
x = ref3[o];
value = [];
for (field in x) {
param = x[field];
if (field.indexOf('::') !== -1) {
[field, type] = field.split('::');
type = type.trim();
y = `$${push_param(param, type)}::${type}`;
} else {
y = `$${push_param(param)}`;
}
value[fields_to_index[field]] = y;
}
values.push(value);
}
} else {
// A single entry that we'll insert.
fields = [];
values = [];
ref4 = opts.values;
for (field in ref4) {
param = ref4[field];
if (param == null) { // ignore undefined fields -- makes code cleaner (and makes sense)
continue;
}
if (field.indexOf('::') !== -1) {
[field, type] = field.split('::');
fields.push(quote_field(field.trim()));
type = type.trim();
values.push(`$${push_param(param, type)}::${type}`);
continue;
} else {
fields.push(quote_field(field));
values.push(`$${push_param(param)}`);
}
}
values = [values]; // just one
}
if (values.length > 0) {
opts.query += ` (${((function() {
var len3, q, results;
results = [];
for (q = 0, len3 = fields.length; q < len3; q++) {
field = fields[q];
results.push(quote_field(field));
}
return results;
})()).join(',')}) VALUES ` + ((function() {
var len3, q, results;
results = [];
for (q = 0, len3 = values.length; q < len3; q++) {
value = values[q];
results.push(` (${value.join(',')}) `);
}
return results;
})()).join(',');
}
}
if (opts.set != null) {
v = [];
ref5 = opts.set;
for (field in ref5) {
param = ref5[field];
if (field.indexOf('::') !== -1) {
[field, type] = field.split('::');
type = type.trim();
v.push(`${quote_field(field.trim())}=$${push_param(param, type)}::${type}`);
continue;
} else {
v.push(`${quote_field(field.trim())}=$${push_param(param)}`);
}
}
if (v.length > 0) {
SET.push(...v);
}
}
if (opts.conflict != null) {
if (misc.is_string(opts.conflict) && misc.startswith(opts.conflict.toLowerCase().trim(), 'on conflict')) {
// Straight string inclusion
opts.query += ' ' + opts.conflict + ' ';
} else {
if (opts.values == null) {
if (typeof opts.cb === "function") {
opts.cb("if conflict is specified then values must also be specified");
}
return;
}
if (!misc.is_array(opts.conflict)) {
if (typeof opts.conflict !== 'string') {
if (typeof opts.cb === "function") {
opts.cb(`conflict (='${misc.to_json(opts.conflict)}') must be a string (the field name), for now`);
}
return;
} else {
conflict = [opts.conflict];
}
} else {
conflict = opts.conflict;
}
v = (function() {
var len3, q, results;
results = [];
for (q = 0, len3 = fields.length; q < len3; q++) {
field = fields[q];
if (indexOf.call(conflict, field) < 0) {
results.push(`${quote_field(field)}=EXCLUDED.${field}`);
}
}
return results;
})();
SET.push(...v);
if (SET.length === 0) {
opts.query += ` ON CONFLICT (${conflict.join(',')}) DO NOTHING `;
} else {
opts.query += ` ON CONFLICT (${conflict.join(',')}) DO UPDATE `;
}
}
}
if (SET.length > 0) {
opts.query += " SET " + SET.join(' , ');
}
WHERE = [];
push_where = (x) => {
var cond, len3, q, results;
if (typeof x === 'string') {
return WHERE.push(x);
} else if (misc.is_array(x)) {
results = [];
for (q = 0, len3 = x.length; q < len3; q++) {
v = x[q];
results.push(push_where(v));
}
return results;
} else if (misc.is_object(x)) {
for (cond in x) {
param = x[cond];
if (typeof cond !== 'string') {
if (typeof opts.cb === "function") {
opts.cb(`each condition must be a string but '${cond}' isn't`);
}
return;
}
if (param == null) { // *IGNORE* where conditions where value is explicitly undefined
continue;
}
if (cond.indexOf('$') === -1) {
// where condition is missing it's $ parameter -- default to equality
cond += " = $";
}
WHERE.push(cond.replace('$', `$${push_param(param)}`));
}
}
};
if (opts.where != null) {
push_where(opts.where);
}
if (WHERE.length > 0) {
if (opts.values != null) {
if (typeof opts.cb === "function") {
opts.cb("values must not be given if where clause given");
}
return;
}
opts.query += ` WHERE ${WHERE.join(' AND ')}`;
}
if (opts.order_by != null) {
if (opts.order_by.indexOf("'") >= 0) {
err = `ERROR -- detected ' apostrophe in order_by='${opts.order_by}'`;
dbg(err);
if (typeof opts.cb === "function") {
opts.cb(err);
}
return;
}
opts.query += ` ORDER BY ${opts.order_by} `;
}
if (opts.limit != null) {
if (!validator.isInt('' + opts.limit, {
min: 0
})) {
err = `ERROR -- opts.limit = '${opts.limit}' is not an integer`;
dbg(err);
if (typeof opts.cb === "function") {
opts.cb(err);
}
return;
}
opts.query += ` LIMIT ${opts.limit} `;
}
if (opts.offset != null) {
if (!validator.isInt('' + opts.offset, {
min: 0
})) {
err = `ERROR -- opts.offset = '${opts.offset}' is not an integer`;
dbg(err);
if (typeof opts.cb === "function") {
opts.cb(err);
}
return;
}
opts.query += ` OFFSET ${opts.offset} `;
}
if (opts.safety_check) {
safety_check = opts.query.toLowerCase();
if ((safety_check.indexOf('update') !== -1 || safety_check.indexOf('delete') !== -1) && (safety_check.indexOf('where') === -1 && safety_check.indexOf('trigger') === -1 && safety_check.indexOf('insert') === -1 && safety_check.indexOf('create') === -1)) {
// This is always a bug.
err = `ERROR -- Dangerous UPDATE or DELETE without a WHERE, TRIGGER, or INSERT: query='${opts.query}'`;
dbg(err);
if (typeof opts.cb === "function") {
opts.cb(err);
}
return;
}
}
if (opts.cache && (this._query_cache != null)) {
// check for cached result
full_query_string = JSON.stringify([opts.query, opts.params]);
if ((x = this._query_cache.get(full_query_string)) != null) {
dbg(`using cache for '${opts.query}'`);
if (typeof opts.cb === "function") {
opts.cb(...x);
}
return;
}
}
// params can easily be huge, e.g., a blob. But this may be
// needed at some point for debugging.
//dbg("query='#{opts.query}', params=#{misc.to_json(opts.params)}")
client = this._client();
if (client == null) {
if (typeof opts.cb === "function") {
opts.cb("not connected");
}
return;
}
if (this._concurrent_queries == null) {
this._concurrent_queries = 0;
}
this._concurrent_queries += 1;
dbg(`query='${opts.query} (concurrent=${this._concurrent_queries})'`);
if ((ref6 = this.concurrent_counter) != null) {
ref6.labels('started').inc(1);
}
try {
start = new Date();
if (this._timeout_ms && this._timeout_delay_ms) {
// Create a timer, so that if the query doesn't return within
// timeout_ms time, then the entire connection is destroyed.
// It then gets recreated automatically. I tested
// and all outstanding queries also get an error when this happens.
timeout_error = () => {
// Only disconnect with timeout error if it has been sufficiently long
// since connecting. This way when an error is triggered, all the
// outstanding timers at the moment of the error will just get ignored
// when they fire (since @_connect_time is 0 or too recent).
if (this._connect_time && new Date() - this._connect_time > this._timeout_delay_ms) {
return client.emit('error', 'timeout');
}
};
timer = setTimeout(timeout_error, this._timeout_ms);
}
// PAINFUL FACT: In client.query below, if the client is closed/killed/errored
// (especially via client.emit above), then none of the callbacks from
// client.query are called!
finished = false;
error_listener = function() {
dbg("error_listener fired");
return query_cb('error');
};
client.once('error', error_listener);
query_cb = (err, result) => {
var query_time_ms, ref7, ref8, ref9;
if (finished) { // ensure no matter what that query_cb is called at most once.
dbg("called when finished (ignoring)");
return;
}
finished = true;
client.removeListener('error', error_listener);
if (this._timeout_ms) {
clearTimeout(timer);
}
query_time_ms = new Date() - start;
this._concurrent_queries -= 1;
if ((ref7 = this.query_time_histogram) != null) {
ref7.observe({
table: (ref8 = opts.table) != null ? ref8 : ''
}, query_time_ms);
}
if ((ref9 = this.concurrent_counter) != null) {
ref9.labels('ended').inc(1);
}
if (err) {
dbg(`done (concurrent=${this._concurrent_queries}), (query_time_ms=${query_time_ms}) -- error: ${err}`);
err = 'postgresql ' + err;
} else {
dbg(`done (concurrent=${this._concurrent_queries}) (query_time_ms=${query_time_ms}) -- success`);
}
if (opts.cache && (this._query_cache != null)) {
this._query_cache.set(full_query_string, [err, result]);
}
if (typeof opts.cb === "function") {
opts.cb(err, result);
}
if (query_time_ms >= QUERY_ALERT_THRESH_MS) {
return dbg(`QUERY_ALERT_THRESH: query_time_ms=${query_time_ms}\nQUERY_ALERT_THRESH: query='${opts.query}'\nQUERY_ALERT_THRESH: params='${misc.to_json(opts.params)}'`);
}
};
if ((opts.timeout_s != null) && typeof opts.timeout_s === 'number' && opts.timeout_s >= 0) {
dbg(`set query timeout to ${opts.timeout_s}secs`);
if (opts.pg_params == null) {
opts.pg_params = {};
}
// the actual param is in milliseconds
// https://postgresqlco.nf/en/doc/param/statement_timeout/
opts.pg_params.statement_timeout = 1000 * opts.timeout_s;
}
if (opts.pg_params != null) {
dbg("run query with specific postgres parameters in a transaction");
do_query_with_pg_params({
client: client,
query: opts.query,
params: opts.params,
pg_params: opts.pg_params,
cb: query_cb
});
} else {
client.query(opts.query, opts.params, query_cb);
}
} catch (error) {
e = error;
// this should never ever happen
dbg(`EXCEPTION in client.query: ${e}`);
if (typeof opts.cb === "function") {
opts.cb(e);
}
this._concurrent_queries -= 1;
if ((ref7 = this.concurrent_counter) != null) {
ref7.labels('ended').inc(1);
}
}
}
_count(opts) {
boundMethodCheck(this, ref);
opts = defaults(opts, {
table: required,
where: void 0, // as in _query
cb: required
});
return this._query({
query: `SELECT COUNT(*) FROM ${opts.table}`,
where: opts.where,
cb: count_result(opts.cb)
});
}
_validate_opts(opts) {
var j, k, l, len, len1, v, w;
boundMethodCheck(this, ref);
for (k in opts) {
v = opts[k];
if (k.slice(k.length - 2) === 'id') {
if ((v != null) && !misc.is_valid_uuid_string(v)) {
if (typeof opts.cb === "function") {
opts.cb(`invalid ${k} -- ${v}`);
}
return false;
}
}
if (k.slice(k.length - 3) === 'ids') {
for (j = 0, len = v.length; j < len; j++) {
w = v[j];
if (!misc.is_valid_uuid_string(w)) {
if (typeof opts.cb === "function") {
opts.cb(`invalid uuid ${w} in ${k} -- ${misc.to_json(v)}`);
}
return false;
}
}
}
if (k === 'group' && indexOf.call(misc.PROJECT_GROUPS, v) < 0) {
if (typeof opts.cb === "function") {
opts.cb(`unknown project group '${v}'`);
}
return false;
}
if (k === 'groups') {
for (l = 0, len1 = v.length; l < len1; l++) {
w = v[l];
if (indexOf.call(misc.PROJECT_GROUPS, w) < 0) {
if (typeof opts.cb === "function") {
opts.cb(`unknown project group '${w}' in groups`);
}
return false;
}
}
}
}
return true;
}
_ensure_database_exists(cb) {
var args, dbg;
boundMethodCheck(this, ref);
dbg = this._dbg("_ensure_database_exists");
dbg(`ensure database '${this._database}' exists`);
args = ['--user', this._user, '--host', this._host.split(',')[0], '--port', this._port, '--list', '--tuples-only'];
dbg(`psql ${args.join(' ')}`);
return misc_node.execute_code({
command: 'psql',
args: args,
env: {
PGPASSWORD: this._password
},
cb: (err, output) => {
var databases, ref1, x;
if (err) {
cb(err);
return;
}
databases = (function() {
var j, len, ref1, results;
ref1 = output.stdout.split('\n');
results = [];
for (j = 0, len = ref1.length; j < len; j++) {
x = ref1[j];
if (x) {
results.push(x.split('|')[0].trim());
}
}
return results;
})();
if (ref1 = this._database, indexOf.call(databases, ref1) >= 0) {
dbg(`database '${this._database}' already exists`);
cb();
return;
}
dbg(`creating database '${this._database}'`);
return misc_node.execute_code({
command: 'createdb',
args: ['--host', this._host, '--port', this._port, this._database],
cb: cb
});
}
});
}
_confirm_delete(opts) {
var dbg, err;
boundMethodCheck(this, ref);
opts = defaults(opts, {
confirm: 'no',
cb: required
});
dbg = this._dbg("confirm");
if (opts.confirm !== 'yes') {
err = `Really delete all data? -- you must explicitly pass in confirm='yes' (but confirm:'${opts.confirm}')`;
dbg(err);
opts.cb(err);
return false;
} else {
return true;
}
}
set_random_password(opts) {
boundMethodCheck(this, ref);
throw Error("NotImplementedError");
}
delete_entire_database(opts) {
var dbg;