@clickup/ent-framework
Version:
A PostgreSQL graph-database-alike library with microsharding and row-level security
374 lines • 20.9 kB
JavaScript
"use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.PrimitiveMixin = PrimitiveMixin;
const fast_typescript_memoize_1 = require("fast-typescript-memoize");
const compact_1 = __importDefault(require("lodash/compact"));
const first_1 = __importDefault(require("lodash/first"));
const flatten_1 = __importDefault(require("lodash/flatten"));
const sum_1 = __importDefault(require("lodash/sum"));
const ClientError_1 = require("../../abstract/ClientError");
const misc_1 = require("../../internal/misc");
const types_1 = require("../../types");
const EntNotInsertableError_1 = require("../errors/EntNotInsertableError");
const Predicate_1 = require("../predicates/Predicate");
/**
* Modifies the passed class adding support for the minimal number of basic Ent
* operations. Internally, uses Schema abstractions to run them.
*/
function PrimitiveMixin(Base) {
class PrimitiveMixin extends Base {
constructor() {
super();
throw Error("Don't create Ents manually, use static loaders");
}
static async insertIfNotExists(vc, input) {
const [shard] = await (0, misc_1.join)([
this.SHARD_LOCATOR.singleShardForInsert(input, "insert"),
vc.heartbeater.heartbeat(),
]);
if (!vc.isOmni()) {
await this.VALIDATION.validateInsert(vc, input);
}
if (this.TRIGGERS.hasInsertTriggers() || this.INVERSES.length > 0) {
// We have some triggers or inverses; that means we must generate an ID
// separately to let the before-triggers see it before the actual db
// operation happens.
const [id2, id2IsNewlyGenerated] = (0, misc_1.hasKey)(types_1.ID, input)
? [input[types_1.ID], false]
: [
await shard.run(this.SCHEMA.idGen(), vc.toAnnotation(), vc.timeline(shard, this.SCHEMA.name), vc.freshness),
true,
];
vc.cache(Predicate_1.IDsCacheUpdatable).add(id2); // to enable privacy checks in beforeInsert triggers
// Inverses which we're going to create.
const inverseRows = this.INVERSES.map((inverse) => ({
inverse,
id1: input[inverse.id2Field],
id2,
canUndoInverseInsert: id2IsNewlyGenerated,
}));
let isInserted = false;
let allowUndoInverses = true;
let lastError = undefined;
try {
// Preliminarily insert inverse rows to inverses table, even before we
// insert the main Ent. This avoids race conditions for cases when
// multiple Clients insert and load the main Ent simultaneously: in
// terms of business logic, there is nothing too bad in having some
// "extra" inverses in the database since they're only "hints" and are
// used to resolve shard CANDIDATES.
await (0, misc_1.mapJoin)(inverseRows, async (inverseRow) => {
const { inverse, id1, id2 } = inverseRow;
if (!(await inverse.beforeInsert(vc, id1, id2))) {
// We must not allow to even try undoing an Inverse creation in
// case we know that we did not create it. (Some Inverses may
// already exist beforehand, e.g. for the cases when Ent ID was
// explicitly passed during its insertion.)
inverseRow.canUndoInverseInsert = false;
}
});
// Insert the actual Ent. On DB error, we'll get an exception, and on
// a duplicate key violation (which is a business logic condition),
// we'll get a null returned.
return await this.TRIGGERS.wrapInsert(async (input) => {
const id = await shard.run(this.SCHEMA.insert(input), vc.toAnnotation(), vc.timeline(shard, this.SCHEMA.name), vc.freshness, (error, _attempt) => {
// Do we know for sure whether the server applied the insert
// or not? Some examples are: "connection reset" or DB proxy
// timeout: in those cases, it's quite possible that the
// insert actually DID succeed internally (so we must NOT undo
// inverses), but we still received an error, so we must NOT
// delete inverses as a cleanup action.
if (!(error instanceof ClientError_1.ClientError) ||
error.kind === "unknown-server-state") {
allowUndoInverses = false;
}
});
// Remember isInserted flag based on the ID received from the
// direct INSERT operation, prior to the after-triggers kick in.
// This allows to make decisions about inverse-undo in case some
// after-triggers fail.
isInserted = !!id;
return id;
}, vc, { ...input, [types_1.ID]: id2 });
}
catch (e) {
lastError = e;
throw e;
}
finally {
// There are 3 failure conditions here:
// 1. There was an exception, but we don't know the state of PG server
// (it might or might not have applied the insert).
// 2. There was an exception during the INSERT (in this case,
// isInserted will remain null due to the above initialization),
// and we received the response from PG.
// 3. An INSERT resulted in a no-op due to unique constraints
// violation (in this case, insert() will return null, and we will
// write false to isInserted).
if (!isInserted && allowUndoInverses) {
// We couldn't insert the Ent due to an unique key violation or some
// other DB error for which we know the exact PG server state. Try
// to undo the inverses creation (but if we fail to undo, it's not a
// big deal to have stale inverses in the DB since they are only
// "hints" and affect Shard candidates locating). This logic looks
// scary, but in real life, there will likely be an "inverses fixer"
// service that removes orphaned inverses asynchronously.
await (0, misc_1.mapJoin)(inverseRows, async ({ inverse, id1, id2, canUndoInverseInsert }) => {
if (canUndoInverseInsert) {
this.CLUSTER.options.loggers.swallowedErrorLogger({
where: `(not an error, just a debug warning) PrimitiveMixin.insertIfNotExists(${this.name}), ` +
`undoing Inverse ${inverse.type} ${id1}->${id2}`,
error: lastError ?? Error("duplicate key on insert"),
elapsed: null,
importance: "low",
});
await inverse.afterDelete(vc, id1, id2).catch(() => { });
}
});
}
}
}
else {
// No insert triggers and no inverses: do just a plain insert.
return shard.run(this.SCHEMA.insert(input), vc.toAnnotation(), vc.timeline(shard, this.SCHEMA.name), vc.freshness);
}
}
static async upsert(vc, input) {
const [shard] = await (0, misc_1.join)([
this.SHARD_LOCATOR.singleShardForInsert(input, "upsert"),
vc.heartbeater.heartbeat(),
]);
if (this.TRIGGERS.hasInsertTriggers() ||
this.TRIGGERS.hasUpdateTriggers()) {
throw new EntNotInsertableError_1.EntNotInsertableError(this.name, vc.toString(), input, "upsert cannot work with triggers defined since it doesn't know whether the row was inserted or updated in the database");
}
if (this.INVERSES.length > 0) {
throw new EntNotInsertableError_1.EntNotInsertableError(this.name, vc.toString(), input, "upsert cannot work with inverses since it doesn't know the old values of fields in the database");
}
if (!vc.isOmni()) {
await this.VALIDATION.validateInsert(vc, input);
}
const query = this.SCHEMA.upsert(input);
const id = await shard.run(query, vc.toAnnotation(), vc.timeline(shard, this.SCHEMA.name), vc.freshness);
vc.cache(Predicate_1.IDsCacheUpdatable).add(id);
return id;
}
static async loadNullable(vc, id) {
const [shard] = await (0, misc_1.join)([
this.SHARD_LOCATOR.singleShardFromID(types_1.ID, id, "loadNullable"),
vc.heartbeater.heartbeat(),
]);
if (!shard) {
return null;
}
const query = this.SCHEMA.load(id);
const row = await shard.run(query, vc.toAnnotation(), vc.timeline(shard, this.SCHEMA.name), vc.freshness);
return row ? this.createEnt(vc, row) : null;
}
static async loadByNullable(vc, input) {
const [shards] = await (0, misc_1.join)([
this.SHARD_LOCATOR.multiShardsFromInput(vc, input, "loadBy"),
vc.heartbeater.heartbeat(),
]);
const rows = (0, compact_1.default)(await (0, misc_1.mapJoin)(shards, async (shard) => shard.run(this.SCHEMA.loadBy(input), vc.toAnnotation(), vc.timeline(shard, this.SCHEMA.name), vc.freshness)));
const row = (0, first_1.default)(rows);
return row ? this.createEnt(vc, row) : null;
}
static async selectBy(vc, input) {
const [shards] = await (0, misc_1.join)([
this.SHARD_LOCATOR.multiShardsFromInput(vc, input, "selectBy"),
vc.heartbeater.heartbeat(),
]);
const ents = await (0, misc_1.mapJoin)(shards, async (shard) => {
const rows = await shard.run(this.SCHEMA.selectBy(input), vc.toAnnotation(), vc.timeline(shard, this.SCHEMA.name), vc.freshness);
return (0, misc_1.mapJoin)(rows, async (row) => this.createEnt(vc, row));
});
return (0, flatten_1.default)(ents);
}
static async select(vc, where, limit, order, custom) {
const [shards] = await (0, misc_1.join)([
this.SHARD_LOCATOR.multiShardsFromInput(vc, where, "select"),
vc.heartbeater.heartbeat(),
]);
const ents = await (0, misc_1.mapJoin)(shards, async (shard) => {
const rows = await shard.run(this.SCHEMA.select({ where, limit, order, custom }), vc.toAnnotation(), vc.timeline(shard, this.SCHEMA.name), vc.freshness);
return (0, misc_1.mapJoin)(rows, async (row) => this.createEnt(vc, row));
});
return (0, flatten_1.default)(ents);
}
static async *selectChunked(vc, where, chunkSize, limit, custom) {
const [shards] = await (0, misc_1.join)([
this.SHARD_LOCATOR.multiShardsFromInput(vc, where, "selectChunked"),
vc.heartbeater.heartbeat(),
]);
let lastSeenID = "0";
while (true) {
if (limit <= 0 || shards.length === 0) {
return;
}
if (limit < chunkSize) {
chunkSize = limit;
}
const cursoredWhere = {
...where,
$and: [{ [types_1.ID]: { $gt: lastSeenID } }, ...(where.$and ?? [])],
};
await vc.heartbeater.heartbeat();
const shard = shards[0];
const rows = await shard.run(this.SCHEMA.select({
where: cursoredWhere,
limit: chunkSize,
order: [{ [types_1.ID]: "ASC" }], // IMPORTANT for idCursor
custom,
}), vc.toAnnotation(), vc.timeline(shard, this.SCHEMA.name), vc.freshness);
if (rows.length > 0) {
const chunk = await (0, misc_1.mapJoin)(rows, async (row) => this.createEnt(vc, row));
yield chunk;
lastSeenID = chunk[chunk.length - 1][types_1.ID];
limit -= chunk.length;
}
if (rows.length === 0 || rows.length < chunkSize) {
shards.shift();
lastSeenID = "0";
}
}
}
static async count(vc, where) {
const [shards] = await (0, misc_1.join)([
this.SHARD_LOCATOR.multiShardsFromInput(vc, where, "count"),
vc.heartbeater.heartbeat(),
]);
const counts = await (0, misc_1.mapJoin)(shards, async (shard) => shard.run(this.SCHEMA.count(where), vc.toAnnotation(), vc.timeline(shard, this.SCHEMA.name), vc.freshness));
return (0, sum_1.default)(counts);
}
static async exists(vc, where) {
const [shards] = await (0, misc_1.join)([
this.SHARD_LOCATOR.multiShardsFromInput(vc, where, "exists"),
vc.heartbeater.heartbeat(),
]);
const exists = await (0, misc_1.mapJoin)(shards, async (shard) => shard.run(this.SCHEMA.exists(where), vc.toAnnotation(), vc.timeline(shard, this.SCHEMA.name), vc.freshness));
return exists.some((v) => v);
}
async updateOriginal(inputIn) {
const cas = inputIn.$cas;
const input = (cas === "skip-if-someone-else-changed-updating-ent-props" ||
cas instanceof Array
? {
...inputIn,
$cas: Object.fromEntries((cas instanceof Array ? cas : Object.keys(inputIn))
.filter((k) => !!this.constructor.SCHEMA.table[k])
.map((k) => [k, this[k] ?? null])),
}
: inputIn);
const [shard] = await (0, misc_1.join)([
this.constructor.SHARD_LOCATOR.singleShardFromID(types_1.ID, this[types_1.ID], "updateOriginal"),
this.vc.heartbeater.heartbeat(),
]);
if (!this.vc.isOmni()) {
await this.constructor.VALIDATION.validateUpdate(this.vc, this, input);
}
if (!shard) {
return false;
}
return this.constructor.TRIGGERS.wrapUpdate(async (input) => {
const updated = await shard.run(this.constructor.SCHEMA.update(this[types_1.ID], input), this.vc.toAnnotation(), this.vc.timeline(shard, this.constructor.SCHEMA.name), this.vc.freshness);
if (updated) {
this.vc.cache(Predicate_1.IDsCacheUpdatable).add(this[types_1.ID]);
await (0, misc_1.mapJoin)(this.constructor.INVERSES, async (inverse) => (0, misc_1.hasKey)(inverse.id2Field, input) &&
input[inverse.id2Field] !== undefined &&
inverse.afterUpdate(this.vc, input[inverse.id2Field], this[types_1.ID], this[inverse.id2Field]));
}
return updated;
}, this.vc, this, input);
}
async deleteOriginal() {
const [shard] = await (0, misc_1.join)([
this.constructor.SHARD_LOCATOR.singleShardFromID(types_1.ID, this[types_1.ID], "deleteOriginal"),
this.vc.heartbeater.heartbeat(),
]);
if (!this.vc.isOmni()) {
await this.constructor.VALIDATION.validateDelete(this.vc, this);
}
if (!shard) {
return false;
}
return this.constructor.TRIGGERS.wrapDelete(async () => {
const deleted = await shard.run(this.constructor.SCHEMA.delete(this[types_1.ID]), this.vc.toAnnotation(), this.vc.timeline(shard, this.constructor.SCHEMA.name), this.vc.freshness);
if (deleted) {
this.vc.cache(Predicate_1.IDsCacheUpdatable).add(this[types_1.ID]);
await (0, misc_1.mapJoin)(this.constructor.INVERSES, async (inverse) => inverse.afterDelete(this.vc, this[inverse.id2Field], this[types_1.ID]));
}
return deleted;
}, this.vc, this);
}
/**
* Since we disabled the constructor (to not let users call it manually and
* create fake Ents), we simulate its behavior manually. This method is very
* critical to performance since the code normally loads LOTS of Ents.
*/
static async createEnt(vc, row) {
// If we've already created an Ent for this exact (row, VC, EntClass),
// return it. This covers a very frequent case when the same Ent is loaded
// multiple times concurrently from different places, so the DB query is
// coalesced into one load. We're coalescing the Ent too which saves LOTS
// of CPU (spent in this.VALIDATION otherwise) and also enables memoized
// Ent methods to work much more efficiently.
const creator = (0, fast_typescript_memoize_1.memoize2)(row, $CACHED_ENT, async (vc, _EntCls) => {
// Try to reduce permissions and freshness for the injected VC. Also
// turn the omni VC into an user-owning VC (or a guest). For most of
// cases, this call is a no-op (we rarely upgrade/downgrade VCs).
const wasOmniVC = vc.isOmni();
vc = await this.createLowerVC(vc, row);
// Cloning is important here. Due to possible deduplication of exactly
// same requests, the same row object can be returned twice, while we
// request it with two different VCs. We don't want to create two Ents
// sharing same row storage if they have different VCs, so we clone.
const ent = Object.assign(Object.create(this.prototype), row);
Object.defineProperty(ent, "vc", {
value: vc,
enumerable: false, // to safely run JSON.stringify() on an Ent
writable: false,
});
if (!wasOmniVC) {
await this.VALIDATION.validateLoad(vc, ent);
}
return ent;
});
const ent = await creator(vc, this);
ent.vc.cache(Predicate_1.IDsCacheReadable).add(ent[types_1.ID]);
if (vc !== ent.vc) {
vc.cache(Predicate_1.IDsCacheReadable).add(ent[types_1.ID]);
}
return ent;
}
/**
* We never create an Ent with ent.vc = omni; instead, we lower permissions
* to either the Ent's owner (if tenantPrincipalField is used, or if it has a
* field pointing to VC) or to a guest VC.
*/
static async createLowerVC(vc, row) {
let newRowPrincipal;
if (vc.isOmni()) {
newRowPrincipal = null;
if (this.VALIDATION.tenantPrincipalField) {
newRowPrincipal = (row[this.VALIDATION.tenantPrincipalField] ??
null);
}
if (!newRowPrincipal) {
newRowPrincipal = (await this.VALIDATION.inferPrincipal(vc, row))
.principal;
}
}
else {
newRowPrincipal = vc.principal;
}
return vc.toLowerInternal(newRowPrincipal);
}
}
return PrimitiveMixin;
}
const $CACHED_ENT = Symbol("$CACHED_ENT");
//# sourceMappingURL=PrimitiveMixin.js.map