@clickup/pg-mig
Version:
PostgreSQL schema migration tool with microsharding and clustering support
159 lines • 6.9 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.Registry = void 0;
const crypto_1 = __importDefault(require("crypto"));
const fs_1 = require("fs");
const path_1 = require("path");
const partition_1 = __importDefault(require("lodash/partition"));
const sortBy_1 = __importDefault(require("lodash/sortBy"));
const DefaultMap_1 = require("./helpers/DefaultMap");
const extractVars_1 = require("./helpers/extractVars");
const schemaNameMatchesPrefix_1 = require("./helpers/schemaNameMatchesPrefix");
const wrapNonTransactional_1 = require("./helpers/wrapNonTransactional");
// Must be lexicographically less than "0".
const DIGEST_SEP = ".";
/**
* A directory with migration entries.
* For every entry, an "up" and a "dn" files are required.
*/
class Registry {
constructor(dir) {
this.dir = dir;
this.entriesByPrefix = new DefaultMap_1.DefaultMap();
this.versions = new Set();
this.beforeFile = null;
this.afterFile = null;
const files = (0, fs_1.readdirSync)(dir)
.sort()
.filter((file) => (0, fs_1.lstatSync)(dir + "/" + file).isFile());
for (const file of files) {
if (!file.endsWith(".sql")) {
continue;
}
if (file === "before.sql") {
this.beforeFile = buildFile(dir + "/" + file);
continue;
}
if (file === "after.sql") {
this.afterFile = buildFile(dir + "/" + file);
continue;
}
const matches = file.match(/^((\d+\.[^.]+)\.([^.]+))\.(up|dn)\.sql$/);
if (!matches) {
throw ("Migration file must have format " +
"NNNNNN.Title.SchemaPrefix.{up,dn}.sql, but found " +
file);
}
if (matches[4] === "dn") {
continue;
}
const entry = {
up: buildFile(dir + "/" + file),
dn: buildFile(dir + "/" + file.replace(/\.up\.(\w+)$/, ".dn.$1")),
name: matches[1],
schemaPrefix: matches[3],
};
this.entriesByPrefix.getOrAdd(entry.schemaPrefix, []).push(entry);
this.versions.add(entry.name);
}
// Sort entries from longest schema prefix to shortest schema prefix.
// This is needed later for duplicates removal (e.g. if some schema
// name matches "sh0000" pattern, it shouldn't match "sh" pattern later).
this.entriesByPrefix = new DefaultMap_1.DefaultMap((0, sortBy_1.default)(Array.from(this.entriesByPrefix), ([prefix]) => -prefix.length));
}
static chooseBestDigest(values) {
const [digests, resets] = (0, partition_1.default)(values, (digest) => digest.includes(DIGEST_SEP));
// If we have at least one real digest, then use the highest one. It means
// that the database is at least at that migration, since we save all
// digests after successful migration process, when it succeeds everywhere.
if (digests.length > 0) {
return (0, sortBy_1.default)(digests).reverse()[0];
}
// If no real digests are provided, then someone initiated an undo. We
// proceed with DB migration process only when we're sure that we saved undo
// signal to ALL databases, so we can be sure that, even if undo fails, ALL
// DBs will have that reset digest saved in.
if (resets.length > 0) {
return "0" + DIGEST_SEP + (0, sortBy_1.default)(resets)[0];
}
// No digests at all passed to the function.
return "0";
}
getPrefixes() {
return [...this.entriesByPrefix.keys()];
}
groupBySchema(schemas) {
const entriesBySchema = new Map();
for (const schema of schemas) {
for (const [schemaPrefix, list] of this.entriesByPrefix.entries()) {
if (!(0, schemaNameMatchesPrefix_1.schemaNameMatchesPrefix)(schema, schemaPrefix)) {
continue;
}
if (entriesBySchema.has(schema)) {
const prevPrefix = entriesBySchema.get(schema)[0].schemaPrefix;
if (prevPrefix.startsWith(schemaPrefix)) {
// We've already matched this schema to a migration with some
// longer prefix; e.g. if we have both migrations for "sh0000"
// and "sh" prefixes, then the schema "sh0000" will match to
// only the 1st one, and the 2nd one will be skipped.
continue;
}
throw (`Schema ${schema} matches more than one migration prefix ` +
`(${prevPrefix} and ${schemaPrefix})`);
}
entriesBySchema.set(schema, list);
}
}
return entriesBySchema;
}
getVersions() {
return (0, sortBy_1.default)([...this.versions]);
}
hasVersion(version) {
return this.versions.has(version);
}
extractVersion(name) {
const matches = name.match(/^\d+\.[^.]+\.[^.]+/);
return matches ? matches[0] : name;
}
getDigest(type) {
var _a;
const versions = this.getVersions();
const lastOrder = ((_a = versions[versions.length - 1]) === null || _a === void 0 ? void 0 : _a.match(/^(\d+)/))
? RegExp.$1
: versions.length > 0
? versions[versions.length - 1]
: "0";
const hash = crypto_1.default
.createHash("sha256")
.update(versions.join("\n"))
.digest("hex");
return (lastOrder + DIGEST_SEP + (type === "short" ? hash.slice(0, 16) : hash));
}
}
exports.Registry = Registry;
function buildFile(fileName) {
if (!(0, fs_1.existsSync)(fileName)) {
throw `Migration file doesn't exist: ${fileName}`;
}
const content = (0, fs_1.readFileSync)(fileName).toString();
const vars = (0, extractVars_1.extractVars)(fileName, content);
const file = {
fileName,
parallelismGlobal: vars.$parallelism_global || Number.POSITIVE_INFINITY,
parallelismPerHost: vars.$parallelism_per_host || Number.POSITIVE_INFINITY,
delay: vars.$delay || 0,
runAlone: !!vars.$run_alone,
vars,
};
const { errors } = (0, wrapNonTransactional_1.wrapNonTransactional)(fileName, vars);
if (errors.length > 0) {
throw (`File ${(0, path_1.basename)(fileName)} must satisfy the following:\n` +
errors.map((e) => ` - ${e}`).join("\n"));
}
return file;
}
//# sourceMappingURL=Registry.js.map