smc-hub
Version:
CoCalc: Backend webserver component
274 lines (255 loc) • 8.98 kB
JavaScript
// Generated by CoffeeScript 2.5.1
(function() {
//########################################################################
// This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.
// License: AGPLv3 s.t. "Commons Clause" – see LICENSE.md for details
//########################################################################
/*
PostgreSQL -- operations code, e.g., backups, maintenance, etc.
COPYRIGHT : (c) 2017 SageMath, Inc.
LICENSE : AGPLv3
*/
var SCHEMA, async, defaults, fs, misc, misc_node, required,
boundMethodCheck = function(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new Error('Bound instance method accessed before binding'); } },
indexOf = [].indexOf;
fs = require('fs');
async = require('async');
misc_node = require('smc-util-node/misc_node');
({defaults} = misc = require('smc-util/misc'));
required = defaults.required;
({SCHEMA} = require('smc-util/schema'));
exports.extend_PostgreSQL = function(ext) {
var PostgreSQL;
return PostgreSQL = class PostgreSQL extends ext {
constructor() {
super(...arguments);
// Backups up the indicated tables.
// WARNING: This is NOT guaranteed to give a point
// in time backup of the entire database across tables!
// The backup of each table is only consistent within that
// table. For CoCalc, this tends to be fine, due to our design.
// The advantage of this is that we can backup huge tables
// only once a week, and other smaller tables much more frequently.
// For tables:
// - a list of tables
// - 'all' (the string) -- backs up everything in the SMC schema (not the database!)
// - 'critical' -- backs up only smaller critical tables, which we would desparately
// need for disaster recovery
this.backup_tables = this.backup_tables.bind(this);
this._backup_table = this._backup_table.bind(this);
this._backup_bup = this._backup_bup.bind(this);
this._get_backup_tables = this._get_backup_tables.bind(this);
// Restore the given tables from the backup in the given directory.
this.restore_tables = this.restore_tables.bind(this);
this._restore_table = this._restore_table.bind(this);
}
backup_tables(opts) {
var dbg, tables;
boundMethodCheck(this, PostgreSQL);
opts = defaults(opts, {
tables: required, // list of tables, 'all' or 'critical'
path: 'backup',
limit: 3, // number of tables to backup in parallel
bup: true, // creates/updates a bup archive in backup/.bup,
// so we have snapshots of all past backups!
cb: required
});
tables = this._get_backup_tables(opts.tables);
dbg = this._dbg("backup_tables()");
dbg(`backing up tables: ${misc.to_json(tables)}`);
return async.series([
(cb) => {
var backup;
backup = (table,
cb) => {
dbg(`backup '${table}'`);
return this._backup_table({
table: table,
path: opts.path,
cb: cb
});
};
return async.mapLimit(tables,
opts.limit,
backup,
cb);
},
(cb) => {
return this._backup_bup({
path: opts.path,
cb: cb
});
}
], (err) => {
return opts.cb(err);
});
}
_backup_table(opts) {
var cmd, dbg;
boundMethodCheck(this, PostgreSQL);
opts = defaults(opts, {
table: required,
path: 'backup',
cb: required
});
dbg = this._dbg(`_backup_table(table='${opts.table}')`);
cmd = `mkdir -p ${opts.path}; time pg_dump -Fc --table ${opts.table} ${this._database} > ${opts.path}/${opts.table}.bak`;
dbg(cmd);
return misc_node.execute_code({
command: cmd,
timeout: 0,
home: '.',
env: {
PGPASSWORD: this._password,
PGUSER: 'smc',
PGHOST: this._host
},
err_on_exit: true,
cb: opts.cb
});
}
_backup_bup(opts) {
var cmd, dbg;
boundMethodCheck(this, PostgreSQL);
opts = defaults(opts, {
path: 'backup',
cb: required
});
dbg = this._dbg(`_backup_bup(path='${opts.path}')`);
// We use no compression because the backup files are already all highly compressed.
cmd = `mkdir -p '${opts.path}' && export && bup init && bup index '${opts.path}' && bup save --strip --compress=0 '${opts.path}' -n master`;
dbg(cmd);
return misc_node.execute_code({
command: cmd,
timeout: 0,
home: '.',
env: {
BUP_DIR: `${opts.path}/.bup`
},
err_on_exit: true,
cb: opts.cb
});
}
_get_backup_tables(tables) {
var all, i, len, non_critical, s, t, v, x;
boundMethodCheck(this, PostgreSQL);
if (misc.is_array(tables)) {
return tables;
}
all = (function() {
var results;
results = [];
for (t in SCHEMA) {
s = SCHEMA[t];
if (!s.virtual) {
results.push(t);
}
}
return results;
})();
if (tables === 'all') {
return all;
} else if (tables === 'critical') {
// TODO: critical for backup or not should probably be in the schema itself, not here.
v = [];
non_critical = ['stats', 'syncstrings', 'file_use', 'eval_outputs', 'blobs', 'eval_inputs', 'patches', 'cursors'];
for (i = 0, len = all.length; i < len; i++) {
x = all[i];
if (x.indexOf('log') === -1 && indexOf.call(non_critical, x) < 0) {
v.push(x);
}
}
return v;
} else {
return [tables];
}
}
restore_tables(opts) {
var backed_up_tables, dbg, filename, i, len, restore, table, tables;
boundMethodCheck(this, PostgreSQL);
opts = defaults(opts, {
tables: void 0, // same as for backup_tables, or undefined to use whatever we have in the path
path: '/backup/postgres',
limit: 5,
cb: required
});
backed_up_tables = (function() {
var i, len, ref, results;
ref = fs.readdirSync(opts.path);
results = [];
for (i = 0, len = ref.length; i < len; i++) {
filename = ref[i];
if (filename.slice(-4) === '.bak') {
results.push(filename.slice(0, -4));
}
}
return results;
})();
if (opts.tables == null) {
tables = backed_up_tables;
} else {
tables = this._get_backup_tables(opts.tables);
}
for (i = 0, len = tables.length; i < len; i++) {
table = tables[i];
if (indexOf.call(backed_up_tables, table) < 0) {
opts.cb(`there is no backup of '${table}'`);
return;
}
}
dbg = this._dbg("restore_tables()");
dbg(`restoring tables: ${misc.to_json(tables)}`);
restore = (table, cb) => {
dbg(`restore '${table}'`);
return this._restore_table({
table: table,
path: opts.path,
cb: cb
});
};
return async.mapLimit(tables, opts.limit, restore, (err) => {
return opts.cb(err);
});
}
_restore_table(opts) {
var dbg;
boundMethodCheck(this, PostgreSQL);
opts = defaults(opts, {
table: required,
path: 'backup',
cb: required
});
dbg = this._dbg(`_restore_table(table='${opts.table}')`);
return async.series([
(cb) => {
dbg("dropping existing table if it exists");
return this._query({
query: `DROP TABLE IF EXISTS ${opts.table}`,
cb: cb
});
},
(cb) => {
var cmd;
cmd = `time pg_restore -C -d ${this._database} ${opts.path}/${opts.table}.bak`;
dbg(cmd);
return misc_node.execute_code({
command: cmd,
timeout: 0,
home: '.',
env: {
PGPASSWORD: this._password,
PGUSER: this._user,
PGHOST: this._host
},
err_on_exit: true,
cb: cb
});
}
], (err) => {
return opts.cb(err);
});
}
};
};
}).call(this);
//# sourceMappingURL=postgres-ops.js.map