@clickup/pg-mig
Version:
PostgreSQL schema migration tool with microsharding and clustering support
341 lines • 14.2 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.main = main;
exports.migrate = migrate;
exports.loadDBDigest = loadDBDigest;
exports.cli = cli;
const path_1 = require("path");
const compact_1 = __importDefault(require("lodash/compact"));
const mapValues_1 = __importDefault(require("lodash/mapValues"));
const pickBy_1 = __importDefault(require("lodash/pickBy"));
const sortBy_1 = __importDefault(require("lodash/sortBy"));
const Dest_1 = require("./internal/Dest");
const Grid_1 = require("./internal/Grid");
const Args_1 = require("./internal/helpers/Args");
const makeMigration_1 = require("./internal/helpers/makeMigration");
const readConfigs_1 = require("./internal/helpers/readConfigs");
const Patch_1 = require("./internal/Patch");
const ProgressPrinter_1 = require("./internal/ProgressPrinter");
const Registry_1 = require("./internal/Registry");
const render_1 = require("./internal/render");
const MIN_TTY_ROWS = 5;
/**
* CLI tool entry point. This function is run when `pg-mig` is called from the
* command line. Accepts parameters from process.argv. See `migrate()` for
* option names.
*
* If no options are passed, uses `PGHOST`, `PGPORT`, `PGUSER`, `PGPASSWORD`,
* `PGDATABASE` environment variables which are standard for e.g. `psql`.
*
* You can pass multiple hosts separated by comma or semicolon.
*
* Examples:
* ```
* pg-mig --make=my-migration-name@sh
* pg-mig --make=other-migration-name@sh0000
* pg-mig --undo=20191107201239.my-migration-name.sh
* pg-mig --list
* pg-mig --list=digest
* pg-mig
* ```
*/
async function main(argsIn) {
const args = new Args_1.Args(argsIn, [
// We use --migdir and not --dir, because @mapbox/node-pre-gyp used by
// bcrypt conflicts with --dir option.
"migdir",
"hosts",
"port",
"user",
"pass",
"db",
"undo",
"make",
"list",
"parallelism",
], ["dry", "createdb", "force", "skip-config"]);
const action = args.getOptional("make") !== undefined
? { type: "make", name: args.get("make") }
: args.getOptional("list") === ""
? { type: "list" }
: args.getOptional("list") === "digest"
? { type: "digest" }
: args.getOptional("undo") !== undefined
? { type: "undo", version: args.get("undo") }
: { type: "apply", after: [] };
if (!args.flag("skip-config")) {
for (const config of await (0, readConfigs_1.readConfigs)("pg-mig.config", action.type)) {
Object.assign(process.env, (0, mapValues_1.default)((0, pickBy_1.default)(config, (v) => typeof v === "string" ||
typeof v === "number" ||
typeof v === "boolean"), String));
if (action.type === "apply") {
if ("after" in config && typeof config.after === "function") {
action.after.push(config.after);
}
}
}
}
return migrate({
migDir: args.get("migdir", process.env["PGMIGDIR"]),
hosts: (0, compact_1.default)(args
.get("hosts", process.env["PGHOST"] || "127.0.0.1")
.split(/[\s,;]+/)
.map((host) => host.trim())),
port: parseInt(args.get("port", process.env["PGPORT"] || "")) || undefined,
user: args.get("user", process.env["PGUSER"] || "") || undefined,
pass: args.get("pass", process.env["PGPASSWORD"] || "") || undefined,
db: args.get("db", process.env["PGDATABASE"] || "") || undefined,
createDB: args.flag("createdb") ||
![undefined, null, "", "0", "false", "undefined", "null", "no"].includes(process.env["PGCREATEDB"]),
parallelism: parseInt(args.get("parallelism", "0")) || undefined,
dry: args.flag("dry"),
force: args.flag("force"),
action,
});
}
/**
* Similar to main(), but accepts options explicitly, not from process.argv.
* This function is meant to be called from other tools.
*/
async function migrate(options) {
var _a;
const registry = new Registry_1.Registry(options.migDir);
if (options.action.type === "digest") {
return actionDigest(options, registry);
}
if (options.hosts.length === 0) {
throw "No hosts provided.";
}
const hostDests = options.hosts.map((host) => Dest_1.Dest.create(host, options));
// Available in *.sql migration version files.
process.env["PG_MIG_HOSTS"] = hostDests
.map((dest) => dest.hostSpec())
.join(",");
const portIsSignificant = hostDests.some((dest) => dest.port !== hostDests[0].port);
const dbIsSignificant = hostDests.some((dest) => dest.db !== hostDests[0].db);
for (const dest of hostDests) {
dest.setSignificance({ portIsSignificant, dbIsSignificant });
}
(0, render_1.printText)((0, compact_1.default)([
"Running on " + hostDests.map((dest) => dest.name()).join(","),
!portIsSignificant && `port ${hostDests[0].port}`,
!dbIsSignificant && `db ${hostDests[0].db}`,
]).join(", "));
if (options.action.type === "make") {
return actionMake(options, registry, options.action.name);
}
if (options.action.type === "list") {
return actionList(options, registry);
}
while (true) {
const { success, hasMoreWork } = await actionUndoOrApply(options, hostDests, registry);
if (!options.dry &&
options.action.type === "apply" &&
success &&
!hasMoreWork) {
for (const after of (_a = options.action.after) !== null && _a !== void 0 ? _a : []) {
await after();
}
}
if (!success || !hasMoreWork) {
return success;
}
}
}
/**
* Loads the digest strings from the provided databases and chooses the one
* which reflects the database schema status the best.
*/
async function loadDBDigest(dests, sqlRunner) {
const digests = await Dest_1.Dest.loadDigests(dests, sqlRunner);
return Registry_1.Registry.chooseBestDigest(digests);
}
/**
* Makes new migration files.
*/
async function actionMake(options, registry, name) {
const [migrationName, schemaPrefix] = name.split("@");
const usage = "Format: --make=migration_name@schema_prefix";
if (!(migrationName === null || migrationName === void 0 ? void 0 : migrationName.match(/^[-a-z0-9_]+$/))) {
(0, render_1.printError)("migration_name is missing or includes incorrect characters");
(0, render_1.printText)(usage);
return false;
}
if (!schemaPrefix) {
(0, render_1.printError)("schema_prefix is missing");
(0, render_1.printText)(usage);
return false;
}
if (!registry.prefixes.includes(schemaPrefix)) {
(0, render_1.printText)(`WARNING: schema prefix "${schemaPrefix}" wasn't found. Valid prefixes:`);
for (const prefix of registry.prefixes) {
(0, render_1.printText)(`- ${prefix}`);
}
}
(0, render_1.printText)("Making migration files...");
const createdFiles = await (0, makeMigration_1.makeMigration)(options.migDir, migrationName, schemaPrefix);
for (const file of createdFiles) {
(0, render_1.printText)(file);
}
return true;
}
/**
* Prints the list of all migration versions in the registry.
*/
async function actionList(_options, registry) {
(0, render_1.printText)("All versions:");
for (const version of (0, sortBy_1.default)(registry.getVersions())) {
(0, render_1.printText)(` > ${version}`);
}
return true;
}
/**
* Prints the "code digest", of all migration version names on disk. Digest is a
* string, and those strings can be compared lexicographically to determine
* whether the code version is compatible with the DB version: if the DB's
* digest is greater or equal to the code's digest, then they are compatible, so
* the code can be deployed.
*/
async function actionDigest(_options, registry) {
(0, render_1.printText)(registry.getDigest());
return true;
}
/**
* Applies or undoes migrations.
*/
async function actionUndoOrApply(options, hostDests, registry) {
var _a, _b, _c;
const digest = registry.getDigest();
if (options.action.type === "apply" && options.createDB) {
for (const dest of hostDests) {
await dest
.createDB((e) => (0, render_1.printText)(`PostgreSQL host ${dest.name()} is not yet up; waiting (${e})...`))
.then((status) => status === "created" &&
(0, render_1.printText)(`Database ${dest.name()} did not exist; created.`));
}
}
if (options.action.type === "undo" && !options.action.version) {
(0, render_1.printText)(await (0, render_1.renderLatestVersions)(hostDests, registry));
(0, render_1.printError)("Please provide a migration version to undo.");
return { success: false, hasMoreWork: false };
}
const patch = new Patch_1.Patch(hostDests, registry, {
undo: options.action.type === "undo" ? options.action.version : undefined,
});
const chains = await patch.getChains();
// If we are going to undo something, reset the digest in the DB before
// running the down migrations, so if we fail partially, the digest in the DB
// will be reset.
if (options.action.type === "undo" && chains.length > 0 && !options.dry) {
await Dest_1.Dest.saveDigests(hostDests, { reset: "before-undo" });
}
const beforeAfterFiles = (0, compact_1.default)([
(_a = registry.beforeFile) === null || _a === void 0 ? void 0 : _a.fileName,
(_b = registry.afterFile) === null || _b === void 0 ? void 0 : _b.fileName,
]);
if (chains.length === 0 &&
(await Dest_1.Dest.checkRerunFingerprint(hostDests, beforeAfterFiles)) &&
!options.force) {
// If we have nothing to apply, save the digest in case it was not saved
// previously, to keep the invariant.
if (options.action.type === "apply" && !options.dry) {
await Dest_1.Dest.saveDigests(hostDests, { digest });
}
(0, render_1.printText)(await (0, render_1.renderLatestVersions)(hostDests, registry));
(0, render_1.printText)((0, render_1.renderPatchSummary)(chains, []));
(0, render_1.printSuccess)("Nothing to do.");
return { success: true, hasMoreWork: false };
}
if (options.dry) {
(0, render_1.printText)(await (0, render_1.renderLatestVersions)(hostDests, registry));
(0, render_1.printText)((0, render_1.renderPatchSummary)(chains, beforeAfterFiles));
(0, render_1.printSuccess)("Dry-run mode.");
return { success: true, hasMoreWork: false };
}
(0, render_1.printText)((0, render_1.renderPatchSummary)(chains, beforeAfterFiles));
// Remember that if we crash below (e.g. in after.sql), we'll need to run
// before.sql+after.sql on retry even if there are no new migration versions
await Dest_1.Dest.saveRerunFingerprint(hostDests, beforeAfterFiles, "reset");
const grid = new Grid_1.Grid(chains, (_c = options.parallelism) !== null && _c !== void 0 ? _c : 10, registry.beforeFile
? hostDests.map((dest) => ({
type: "dn",
dest,
migrations: [
{
version: (0, path_1.basename)(registry.beforeFile.fileName),
file: registry.beforeFile,
newVersions: null,
},
],
}))
: [], registry.afterFile
? hostDests.map((dest) => ({
type: "up",
dest,
migrations: [
{
version: (0, path_1.basename)(registry.afterFile.fileName),
file: registry.afterFile,
newVersions: null,
},
],
}))
: []);
const progress = process.stdout.isTTY &&
process.stdout.rows &&
process.stdout.rows >= MIN_TTY_ROWS
? new ProgressPrinter_1.ProgressPrinterTTY()
: new ProgressPrinter_1.ProgressPrinterStream();
const success = await grid.run(progress.throttle(() => progress.print((0, render_1.renderGrid)(grid, progress.skipEmptyLines()).lines)));
progress.clear();
const { lines, errors, warnings } = (0, render_1.renderGrid)(grid, true);
if (errors.length > 0) {
(0, render_1.printError)("\n###\n### FAILED. See complete error list below.\n###\n");
(0, render_1.printText)(lines.join("\n"));
(0, render_1.printError)(`Failed with ${errors.length} error(s).`);
}
else if (warnings.length > 0) {
(0, render_1.printText)("\n###\n### SUCCEEDED with warnings. See complete warning list below.\n###\n");
(0, render_1.printText)(lines.join("\n"));
(0, render_1.printSuccess)(`Succeeded with ${warnings.length} warning(s).`);
}
else {
(0, render_1.printSuccess)("Succeeded.");
}
if (!success) {
return { success: false, hasMoreWork: false };
}
await Dest_1.Dest.saveRerunFingerprint(hostDests, beforeAfterFiles, "up-to-date");
if (options.action.type === "apply") {
if ((await patch.getChains()).length > 0) {
return { success: true, hasMoreWork: true };
}
else {
await Dest_1.Dest.saveDigests(hostDests, { digest });
return { success: true, hasMoreWork: false };
}
}
else {
await Dest_1.Dest.saveDigests(hostDests, { reset: "after-undo" });
return { success: true, hasMoreWork: false };
}
}
/**
* A wrapper around main() to call it from a bin script.
*/
function cli() {
main(process.argv.slice(2))
.then((success) => process.exit(success ? 0 : 1))
.catch((e) => {
(0, render_1.printError)(e);
process.exit(1);
});
}
if (require.main === module) {
cli();
}
//# sourceMappingURL=cli.js.map