@clickup/ent-framework
Version:
A PostgreSQL graph-database-alike library with microsharding and row-level security
86 lines • 4.29 kB
JavaScript
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.Client = void 0;
const fast_typescript_memoize_1 = require("fast-typescript-memoize");
const defaults_1 = __importDefault(require("lodash/defaults"));
const objectId_1 = require("../internal/objectId");
const Batcher_1 = require("./Batcher");
/**
* Client is a Shard name aware abstraction which sends an actual query and
* tracks the master/replica timeline. The concrete query sending implementation
* (including required arguments) is up to the derived classes.
*/
class Client {
/**
* Calls swallowedErrorLogger() doing some preliminary amendment.
*/
logSwallowedError(props) {
this.options.loggers?.swallowedErrorLogger({
...props,
where: `${this.constructor.name}(${this.options.name}): ${props.where}`,
});
}
/**
* Initializes an instance of Client.
*/
constructor(options) {
this.options = (0, defaults_1.default)({}, options, Client.DEFAULT_OPTIONS);
}
/**
* Batcher is per-Client per-query-type
* per-table-name-and-shape-and-disableBatching:
*
* - Per-Client means that batchers are removed as soon as the Client is
* removed, i.e. the Client owns all the batchers for all tables.
* - Per-query-type means that the batcher for a SELECT query is different
* from the batcher for an INSERT query (obviously).
* - Per-table-name-and-shape-and-disableBatching means that each table has
* its own set of batchers (obviously). Also, some queries may be complex
* (like UPDATE), so the batcher also depends on the "shape" - the list of
* fields we're updating. Plus, for some inputs, we want to disable batching
* at all - that produces a separate Batcher instance.
*
* Also, for every Batcher, there is exactly one Runner (which knows how to
* build the actual query in the context of the current Client). Batchers are
* generic (like DataLoader, but more general), and Runners are very custom to
* the query (and are private to these queries).
*
* All that means that in a 1000-Shard 20-table Cluster we'll eventually have
* 1000x20x8 Batchers/Runners (assuming we have 8 different operations).
*/
batcher(_QueryClass, _schema, _additionalShape, disableBatching, runnerCreator) {
// At the moment, Runner doesn't depend on the Client. So theoretically we
// could share the same Runner across multiple Batchers (and multiple
// Clients) to save memory (and inject the Client via Runner.run*()
// methods). But we don't do all that right now.
const runner = runnerCreator();
return new Batcher_1.Batcher(runner, this.options.batchDelayMs, disableBatching);
}
/**
* A convenience method to put connections prewarming logic to. The idea is to
* keep the needed number of open connections and also, in each connection,
* minimize the time which the very 1st query will take (e.g. pre-cache
* full-text dictionaries).
*/
prewarm() { }
}
exports.Client = Client;
/** Default values for the constructor options. */
Client.DEFAULT_OPTIONS = {
shardNamer: null,
loggers: null,
batchDelayMs: 0,
};
__decorate([
(0, fast_typescript_memoize_1.Memoize)((QueryClass, schema, additionalShape, _, disableBatching) => `${(0, objectId_1.objectId)(QueryClass)}:${schema.hash}:${additionalShape}:${disableBatching}`)
], Client.prototype, "batcher", null);
//# sourceMappingURL=Client.js.map
;