@clickup/pg-mig
Version:
PostgreSQL schema migration tool with microsharding and clustering support
136 lines • 6.4 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.actionUndoOrApply = actionUndoOrApply;
const path_1 = require("path");
const compact_1 = __importDefault(require("lodash/compact"));
const Dest_1 = require("../internal/Dest");
const Grid_1 = require("../internal/Grid");
const Patch_1 = require("../internal/Patch");
const ProgressPrinter_1 = require("../internal/ProgressPrinter");
const render_1 = require("../internal/render");
const MIN_TTY_ROWS = 5;
/**
* Applies or undoes migrations.
*/
async function actionUndoOrApply(options, hostDests, registry) {
var _a, _b, _c;
const digest = registry.getDigest();
if (options.action.type === "apply" && options.createDB) {
for (const dest of hostDests) {
await dest
.createDB((e) => (0, render_1.printText)(`PostgreSQL host ${dest.getName()} is not yet up; waiting (${e})...`))
.then((status) => status === "created" &&
(0, render_1.printText)(`Database ${dest.getName()} did not exist; created.`));
}
}
if (options.action.type === "undo" && !options.action.version) {
(0, render_1.printText)(await (0, render_1.renderLatestVersions)(hostDests, registry, options.validShardSchemasSql));
(0, render_1.printError)("Please provide a migration version to undo.");
return { success: false, hasMoreWork: false };
}
const patch = new Patch_1.Patch(hostDests, registry, {
undo: options.action.type === "undo" ? options.action.version : undefined,
validShardSchemasSql: options.validShardSchemasSql,
});
const chains = await patch.getChains();
// If we are going to undo something, reset the digest in the DB before
// running the down migrations, so if we fail partially, the digest in the DB
// will be reset.
if (options.action.type === "undo" && chains.length > 0 && !options.dry) {
await Dest_1.Dest.saveDigests(hostDests, { reset: "before-undo" });
}
const beforeAfterFiles = (0, compact_1.default)([
(_a = registry.beforeFile) === null || _a === void 0 ? void 0 : _a.fileName,
(_b = registry.afterFile) === null || _b === void 0 ? void 0 : _b.fileName,
]);
if (chains.length === 0 &&
(await Dest_1.Dest.checkRerunFingerprint(hostDests, beforeAfterFiles)) &&
!options.force) {
// If we have nothing to apply, save the digest in case it was not saved
// previously, to keep the invariant.
if (options.action.type === "apply" && !options.dry) {
await Dest_1.Dest.saveDigests(hostDests, { digest });
}
(0, render_1.printText)(await (0, render_1.renderLatestVersions)(hostDests, registry, options.validShardSchemasSql));
(0, render_1.printText)((0, render_1.renderPatchSummary)(chains, []));
(0, render_1.printSuccess)("Nothing to do.");
return { success: true, hasMoreWork: false };
}
if (options.dry) {
(0, render_1.printText)(await (0, render_1.renderLatestVersions)(hostDests, registry, options.validShardSchemasSql));
(0, render_1.printText)((0, render_1.renderPatchSummary)(chains, beforeAfterFiles));
(0, render_1.printSuccess)("Dry-run mode.");
return { success: true, hasMoreWork: false };
}
(0, render_1.printText)((0, render_1.renderPatchSummary)(chains, beforeAfterFiles));
// Remember that if we crash below (e.g. in after.sql), we'll need to run
// before.sql+after.sql on retry even if there are no new migration versions
await Dest_1.Dest.saveRerunFingerprint(hostDests, beforeAfterFiles, "reset");
const grid = new Grid_1.Grid(chains, (_c = options.parallelism) !== null && _c !== void 0 ? _c : 10, registry.beforeFile
? hostDests.map((dest) => ({
type: "dn",
dest,
migrations: [
{
version: (0, path_1.basename)(registry.beforeFile.fileName),
file: registry.beforeFile,
newVersions: null,
},
],
}))
: [], registry.afterFile
? hostDests.map((dest) => ({
type: "up",
dest,
migrations: [
{
version: (0, path_1.basename)(registry.afterFile.fileName),
file: registry.afterFile,
newVersions: null,
},
],
}))
: []);
const progress = process.stdout.isTTY &&
process.stdout.rows &&
process.stdout.rows >= MIN_TTY_ROWS
? new ProgressPrinter_1.ProgressPrinterTTY()
: new ProgressPrinter_1.ProgressPrinterStream();
const success = await grid.run(progress.throttle(() => progress.print((0, render_1.renderGrid)(grid, progress.skipEmptyLines()).lines)));
progress.clear();
const { lines, errors, warnings } = (0, render_1.renderGrid)(grid, true);
if (errors.length > 0) {
(0, render_1.printError)("\n###\n### FAILED. See complete error list below.\n###\n");
(0, render_1.printText)(lines.join("\n"));
(0, render_1.printError)(`Failed with ${errors.length} error(s).`);
}
else if (warnings.length > 0) {
(0, render_1.printText)("\n###\n### SUCCEEDED with warnings. See complete warning list below.\n###\n");
(0, render_1.printText)(lines.join("\n"));
(0, render_1.printSuccess)(`Succeeded with ${warnings.length} warning(s).`);
}
else {
(0, render_1.printSuccess)("Succeeded.");
}
if (!success) {
return { success: false, hasMoreWork: false };
}
await Dest_1.Dest.saveRerunFingerprint(hostDests, beforeAfterFiles, "up-to-date");
if (options.action.type === "apply") {
if ((await patch.getChains()).length > 0) {
return { success: true, hasMoreWork: true };
}
else {
await Dest_1.Dest.saveDigests(hostDests, { digest });
return { success: true, hasMoreWork: false };
}
}
else {
await Dest_1.Dest.saveDigests(hostDests, { reset: "after-undo" });
return { success: true, hasMoreWork: false };
}
}
//# sourceMappingURL=actionUndoOrApply.js.map