UNPKG

@imqueue/pg-cache

Version:

PostgreSQL managed cache on Redis for @imqueue-based service methods

95 lines 3.92 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.channelsOf = channelsOf; exports.cacheBy = cacheBy; const env_1 = require("./env"); const rpc_1 = require("@imqueue/rpc"); /** * Retrieves table names as channels from the given model and filter them by * a given fields map, if passed. Returns result as list of table names. * * @access private * @param {typeof Model} model * @param {any} [fields] * @param {string[]} [tables] * @return {string[]} */ function channelsOf(model, fields, tables = []) { const modelRels = model.associations; const relsMap = fields ? fields : model.associations; const rels = Object.keys(relsMap); const table = model.tableName; tables.push(table); for (const field of rels) { if (!modelRels[field]) { continue; } const relation = modelRels[field]; const { target, options } = relation; const through = options && options.through && options.through.model; const subFields = (fields || {})[field]; if (through && !~tables.indexOf(through.tableName)) { channelsOf(through, subFields, tables); } if (target && !~tables.indexOf(target.tableName)) { channelsOf(target, subFields, tables); } } return tables; } /** * Decorator factory @cacheBy(Model, CacheByOptions) * This decorator should be used on a service methods, to set the caching * rules for a method. Caching rules within this decorator are defined by a * passed model, which is treated as a root model of the call and it analyzes * cache invalidation based on passed runtime fields arguments, which * prevents unnecessary cache invalidations. So it is more intellectual way * to invalidate cache instead of any changes on described list of tables. */ function cacheBy(model, options) { const opts = options || {}; return (target, methodName, descriptor) => { const original = descriptor.value; const className = typeof target === 'function' ? target.name : target.constructor.name; const ttl = opts.ttl || env_1.DEFAULT_CACHE_TTL; const channels = channelsOf(model); target.pgCacheChannels = target.pgCacheChannels || {}; for (const channel of channels) { const pgChannel = target.pgCacheChannels[channel] = target.pgCacheChannels[channel] || []; pgChannel.push([methodName]); } descriptor.value = async function (...args) { const self = this || target; const cache = self.taggedCache; const logger = (self.logger || console); if (!cache) { (0, env_1.initError)(logger, className, String(methodName), cacheBy); return original.apply(self, args); } const fields = args[opts.fieldsArg]; const key = (0, rpc_1.signature)(className, methodName, args); try { let result = await cache.get(key); if (result === null || result === undefined) { result = original.apply(self, args); if (result && result.then) { result = await result; } const tags = channelsOf(model, fields).map(table => (0, rpc_1.signature)(className, methodName, [table])); cache.set(key, result, tags, ttl) .then(res => (0, env_1.setInfo)(logger, res, key, cacheBy)) .catch(err => (0, env_1.setError)(logger, err, key, cacheBy)); } return result; } catch (err) { (0, env_1.fetchError)(logger, err, key, cacheBy); return original.apply(self, args); } }; }; } //# sourceMappingURL=cacheBy.js.map