UNPKG

@trieb.work/nextjs-turbo-redis-cache

Version:
959 lines (952 loc) 33.1 kB
// src/RedisStringsHandler.ts import { commandOptions, createClient } from "redis"; // src/utils/debug.ts function debug(color = "none", ...args) { const colorCode = { red: "\x1B[31m", blue: "\x1B[34m", green: "\x1B[32m", yellow: "\x1B[33m", cyan: "\x1B[36m", white: "\x1B[37m", none: "" }; if (process.env.DEBUG_CACHE_HANDLER) { console.log(colorCode[color], "DEBUG CACHE HANDLER: ", ...args); } } function debugVerbose(color, ...args) { if (process.env.DEBUG_CACHE_HANDLER_VERBOSE_VERBOSE) { console.log("\x1B[35m", "DEBUG SYNCED MAP: ", ...args); } } // src/SyncedMap.ts var SYNC_CHANNEL_SUFFIX = ":sync-channel:"; var SyncedMap = class { constructor(options) { this.client = options.client; this.keyPrefix = options.keyPrefix; this.redisKey = options.redisKey; this.syncChannel = `${options.keyPrefix}${SYNC_CHANNEL_SUFFIX}${options.redisKey}`; this.database = options.database; this.querySize = options.querySize; this.filterKeys = options.filterKeys; this.resyncIntervalMs = options.resyncIntervalMs; this.customizedSync = options.customizedSync; this.map = /* @__PURE__ */ new Map(); this.subscriberClient = this.client.duplicate(); this.setupLock = new Promise((resolve) => { this.setupLockResolve = resolve; }); this.setup().catch((error) => { console.error("Failed to setup SyncedMap:", error); throw error; }); } async setup() { let setupPromises = []; if (!this.customizedSync?.withoutRedisHashmap) { setupPromises.push(this.initialSync()); this.setupPeriodicResync(); } setupPromises.push(this.setupPubSub()); await Promise.all(setupPromises); this.setupLockResolve(); } async initialSync() { let cursor = 0; const hScanOptions = { COUNT: this.querySize }; try { do { const remoteItems = await redisErrorHandler( "SyncedMap.initialSync(), operation: hScan " + this.syncChannel + " " + this.keyPrefix + " " + this.redisKey + " " + cursor + " " + this.querySize, this.client.hScan( this.keyPrefix + this.redisKey, cursor, hScanOptions ) ); for (const { field, value } of remoteItems.tuples) { if (this.filterKeys(field)) { const parsedValue = JSON.parse(value); this.map.set(field, parsedValue); } } cursor = remoteItems.cursor; } while (cursor !== 0); await this.cleanupKeysNotInRedis(); } catch (error) { console.error("Error during initial sync:", error); throw error; } } async cleanupKeysNotInRedis() { let cursor = 0; const scanOptions = { COUNT: this.querySize, MATCH: `${this.keyPrefix}*` }; let remoteKeys = []; try { do { const remoteKeysPortion = await redisErrorHandler( "SyncedMap.cleanupKeysNotInRedis(), operation: scan " + this.keyPrefix, this.client.scan(cursor, scanOptions) ); remoteKeys = remoteKeys.concat(remoteKeysPortion.keys); cursor = remoteKeysPortion.cursor; } while (cursor !== 0); const remoteKeysSet = new Set( remoteKeys.map((key) => key.substring(this.keyPrefix.length)) ); const keysToDelete = []; for (const key of this.map.keys()) { const keyStr = key; if (!remoteKeysSet.has(keyStr) && this.filterKeys(keyStr)) { keysToDelete.push(keyStr); } } if (keysToDelete.length > 0) { await this.delete(keysToDelete); } } catch (error) { console.error("Error during cleanup of keys not in Redis:", error); throw error; } } setupPeriodicResync() { if (this.resyncIntervalMs && this.resyncIntervalMs > 0) { setInterval(() => { this.initialSync().catch((error) => { console.error("Error during periodic resync:", error); }); }, this.resyncIntervalMs); } } async setupPubSub() { const syncHandler = async (message) => { const syncMessage = JSON.parse(message); if (syncMessage.type === "insert") { if (syncMessage.key !== void 0 && syncMessage.value !== void 0) { this.map.set(syncMessage.key, syncMessage.value); } } else if (syncMessage.type === "delete") { if (syncMessage.keys) { for (const key of syncMessage.keys) { this.map.delete(key); } } } }; const keyEventHandler = async (key, message) => { debug( "yellow", "SyncedMap.keyEventHandler() called with message", this.redisKey, message, key ); if (key.startsWith(this.keyPrefix)) { const keyInMap = key.substring(this.keyPrefix.length); if (this.filterKeys(keyInMap)) { debugVerbose( "SyncedMap.keyEventHandler() key matches filter and will be deleted", this.redisKey, message, key ); await this.delete(keyInMap, true); } } else { debugVerbose( "SyncedMap.keyEventHandler() key does not have prefix", this.redisKey, message, key ); } }; try { await this.subscriberClient.connect().catch(async () => { console.error("Failed to connect subscriber client. Retrying..."); await this.subscriberClient.connect().catch((error) => { console.error("Failed to connect subscriber client.", error); throw error; }); }); if ((process.env.SKIP_KEYSPACE_CONFIG_CHECK || "").toUpperCase() !== "TRUE") { const keyspaceEventConfig = (await this.subscriberClient.configGet("notify-keyspace-events"))?.["notify-keyspace-events"]; if (!keyspaceEventConfig.includes("E")) { throw new Error( 'Keyspace event configuration is set to "' + keyspaceEventConfig + "\" but has to include 'E' for Keyevent events, published with __keyevent@<db>__ prefix. We recommend to set it to 'Exe' like so `redis-cli -h localhost config set notify-keyspace-events Exe`" ); } if (!keyspaceEventConfig.includes("A") && !(keyspaceEventConfig.includes("x") && keyspaceEventConfig.includes("e"))) { throw new Error( 'Keyspace event configuration is set to "' + keyspaceEventConfig + "\" but has to include 'A' or 'x' and 'e' for expired and evicted events. We recommend to set it to 'Exe' like so `redis-cli -h localhost config set notify-keyspace-events Exe`" ); } } await Promise.all([ // We use a custom channel for insert/delete For the following reason: // With custom channel we can delete multiple entries in one message. If we would listen to unlink / del we // could get thousands of messages for one revalidateTag (For example revalidateTag("algolia") would send an enormous amount of network packages) // Also we can send the value in the message for insert this.subscriberClient.subscribe(this.syncChannel, syncHandler), // Subscribe to Redis keyevent notifications for evicted and expired keys this.subscriberClient.subscribe( `__keyevent@${this.database}__:evicted`, keyEventHandler ), this.subscriberClient.subscribe( `__keyevent@${this.database}__:expired`, keyEventHandler ) ]); this.subscriberClient.on("error", async (err) => { console.error("Subscriber client error:", err); try { await this.subscriberClient.quit(); this.subscriberClient = this.client.duplicate(); await this.setupPubSub(); } catch (reconnectError) { console.error( "Failed to reconnect subscriber client:", reconnectError ); } }); } catch (error) { console.error("Error setting up pub/sub client:", error); throw error; } } async waitUntilReady() { await this.setupLock; } get(key) { debugVerbose( "SyncedMap.get() called with key", key, JSON.stringify(this.map.get(key))?.substring(0, 100) ); return this.map.get(key); } async set(key, value) { debugVerbose( "SyncedMap.set() called with key", key, JSON.stringify(value)?.substring(0, 100) ); this.map.set(key, value); const operations = []; if (this.customizedSync?.withoutSetSync) { return; } if (!this.customizedSync?.withoutRedisHashmap) { operations.push( redisErrorHandler( "SyncedMap.set(), operation: hSet " + this.syncChannel + " " + this.keyPrefix + " " + key, this.client.hSet( this.keyPrefix + this.redisKey, key, JSON.stringify(value) ) ) ); } const insertMessage = { type: "insert", key, value }; operations.push( redisErrorHandler( "SyncedMap.set(), operation: publish " + this.syncChannel + " " + this.keyPrefix + " " + key, this.client.publish(this.syncChannel, JSON.stringify(insertMessage)) ) ); await Promise.all(operations); } async delete(keys, withoutSyncMessage = false) { debugVerbose( "SyncedMap.delete() called with keys", this.redisKey, keys, withoutSyncMessage ); const keysArray = Array.isArray(keys) ? keys : [keys]; const operations = []; for (const key of keysArray) { this.map.delete(key); } if (!this.customizedSync?.withoutRedisHashmap) { operations.push( redisErrorHandler( "SyncedMap.delete(), operation: hDel " + this.syncChannel + " " + this.keyPrefix + " " + this.redisKey + " " + keysArray, this.client.hDel(this.keyPrefix + this.redisKey, keysArray) ) ); } if (!withoutSyncMessage) { const deletionMessage = { type: "delete", keys: keysArray }; operations.push( redisErrorHandler( "SyncedMap.delete(), operation: publish " + this.syncChannel + " " + this.keyPrefix + " " + keysArray, this.client.publish( this.syncChannel, JSON.stringify(deletionMessage) ) ) ); } await Promise.all(operations); debugVerbose( "SyncedMap.delete() finished operations", this.redisKey, keys, operations.length ); } has(key) { return this.map.has(key); } entries() { return this.map.entries(); } }; // src/DeduplicatedRequestHandler.ts var DeduplicatedRequestHandler = class { constructor(fn, cachingTimeMs, inMemoryDeduplicationCache) { // Method to handle deduplicated requests this.deduplicatedFunction = (key) => { debugVerbose( "DeduplicatedRequestHandler.deduplicatedFunction() called with", key ); const self = this; const dedupedFn = async (...args) => { debugVerbose( "DeduplicatedRequestHandler.deduplicatedFunction().dedupedFn called with", key ); if (self.inMemoryDeduplicationCache && self.inMemoryDeduplicationCache.has(key)) { debugVerbose( "DeduplicatedRequestHandler.deduplicatedFunction().dedupedFn ", key, "found key in inMemoryDeduplicationCache" ); const res = await self.inMemoryDeduplicationCache.get(key).then((v) => structuredClone(v)); debugVerbose( "DeduplicatedRequestHandler.deduplicatedFunction().dedupedFn ", key, "found key in inMemoryDeduplicationCache and served result from there", JSON.stringify(res).substring(0, 200) ); return res; } const promise = self.fn(...args); self.inMemoryDeduplicationCache.set(key, promise); debugVerbose( "DeduplicatedRequestHandler.deduplicatedFunction().dedupedFn ", key, "did not found key in inMemoryDeduplicationCache. Setting it now and waiting for promise to resolve" ); try { const ts = performance.now(); const result = await promise; debugVerbose( "DeduplicatedRequestHandler.deduplicatedFunction().dedupedFn ", key, "promise resolved (in ", performance.now() - ts, "ms). Returning result", JSON.stringify(result).substring(0, 200) ); return structuredClone(result); } finally { setTimeout(() => { debugVerbose( "DeduplicatedRequestHandler.deduplicatedFunction().dedupedFn ", key, "deleting key from inMemoryDeduplicationCache after ", self.cachingTimeMs, "ms" ); self.inMemoryDeduplicationCache.delete(key); }, self.cachingTimeMs); } }; return dedupedFn; }; this.fn = fn; this.cachingTimeMs = cachingTimeMs; this.inMemoryDeduplicationCache = inMemoryDeduplicationCache; } // Method to manually seed a result into the cache seedRequestReturn(key, value) { const resultPromise = new Promise((res) => res(value)); this.inMemoryDeduplicationCache.set(key, resultPromise); debugVerbose( "DeduplicatedRequestHandler.seedRequestReturn() seeded result ", key, value.substring(0, 200) ); setTimeout(() => { this.inMemoryDeduplicationCache.delete(key); }, this.cachingTimeMs); } }; // src/utils/json.ts function bufferAndMapReviver(_, value) { if (value && typeof value === "object" && typeof value.$binary === "string") { return Buffer.from(value.$binary, "base64"); } if (value && typeof value === "object" && typeof value.$map === "object" && !!value.$map) { return new Map( Object.entries(value.$map).map(([key, value2]) => { const revivedValue = bufferAndMapReviver("", value2); return [key, revivedValue]; }) ); } return value; } function bufferAndMapReplacer(_, value) { if (Buffer.isBuffer(value)) { return { $binary: value.toString("base64") }; } if (value && typeof value === "object" && value?.type === "Buffer" && Array.isArray(value.data)) { return { $binary: Buffer.from(value.data).toString("base64") }; } if (value && typeof value === "object" && value instanceof Map) { return { $map: Object.fromEntries( Array.from(value.entries()).map(([key, value2]) => { const replacedValue = bufferAndMapReplacer("", value2); return [key, replacedValue]; }) ) }; } return value; } // src/RedisStringsHandler.ts function redisErrorHandler(debugInfo, redisCommandResult) { const beforeTimestamp = performance.now(); return redisCommandResult.catch((error) => { console.error( "Redis command error", (performance.now() - beforeTimestamp).toFixed(2), "ms", debugInfo, error ); throw error; }); } if (process.env.DEBUG_CACHE_HANDLER) { setInterval(() => { const start = performance.now(); setImmediate(() => { const duration = performance.now() - start; if (duration > 100) { debug( "yellow", `RedisStringsHandler detected an event loop lag of: ${duration.toFixed(2)}ms. If your container is hosted in a cloud provider with container suspension this is normal. If not you should increase the CPU of your container.` ); } }); }, 500); } var NEXT_CACHE_IMPLICIT_TAG_ID = "_N_T_"; var REVALIDATED_TAGS_KEY = "__revalidated_tags__"; var killContainerOnErrorCount = 0; var RedisStringsHandler = class { constructor({ redisUrl = process.env.REDIS_URL ? process.env.REDIS_URL : process.env.REDISHOST ? `redis://${process.env.REDISHOST}:${process.env.REDISPORT}` : "redis://localhost:6379", database = process.env.VERCEL_ENV === "production" ? 0 : 1, keyPrefix = process.env.VERCEL_URL || "UNDEFINED_URL_", sharedTagsKey = "__sharedTags__", getTimeoutMs = process.env.REDIS_COMMAND_TIMEOUT_MS ? Number.parseInt(process.env.REDIS_COMMAND_TIMEOUT_MS) ?? 500 : 500, revalidateTagQuerySize = 250, avgResyncIntervalMs = 60 * 60 * 1e3, redisGetDeduplication = true, inMemoryCachingTime = 1e4, defaultStaleAge = 60 * 60 * 24 * 14, estimateExpireAge = (staleAge) => process.env.VERCEL_ENV === "production" ? staleAge * 2 : staleAge * 1.2, killContainerOnErrorThreshold = process.env.KILL_CONTAINER_ON_ERROR_THRESHOLD ? Number.parseInt(process.env.KILL_CONTAINER_ON_ERROR_THRESHOLD) ?? 0 : 0, socketOptions, clientOptions }) { this.clientReadyCalls = 0; try { this.keyPrefix = keyPrefix; this.redisGetDeduplication = redisGetDeduplication; this.inMemoryCachingTime = inMemoryCachingTime; this.defaultStaleAge = defaultStaleAge; this.estimateExpireAge = estimateExpireAge; this.killContainerOnErrorThreshold = killContainerOnErrorThreshold; this.getTimeoutMs = getTimeoutMs; try { this.client = createClient({ url: redisUrl, pingInterval: 1e4, // Useful with Redis deployments that do not use TCP Keep-Alive. Restarts the connection if it is idle for too long. ...database !== 0 ? { database } : {}, ...socketOptions ? { socket: { ...socketOptions } } : {}, ...clientOptions || {} }); this.client.on("error", (error) => { console.error( "Redis client error", error, killContainerOnErrorCount++ ); setTimeout( () => this.client.connect().catch((error2) => { console.error( "Failed to reconnect Redis client after connection loss:", error2 ); }), 1e3 ); if (this.killContainerOnErrorThreshold > 0 && killContainerOnErrorCount >= this.killContainerOnErrorThreshold) { console.error( "Redis client error threshold reached, disconnecting and exiting (please implement a restart process/container watchdog to handle this error)", error, killContainerOnErrorCount++ ); this.client.disconnect(); this.client.quit(); setTimeout(() => { process.exit(1); }, 500); } }); this.client.connect().then(() => { debug("green", "Redis client connected."); }).catch(() => { this.client.connect().catch((error) => { console.error("Failed to connect Redis client:", error); this.client.disconnect(); throw error; }); }); } catch (error) { console.error("Failed to initialize Redis client"); throw error; } const filterKeys = (key) => key !== REVALIDATED_TAGS_KEY && key !== sharedTagsKey; this.sharedTagsMap = new SyncedMap({ client: this.client, keyPrefix, redisKey: sharedTagsKey, database, querySize: revalidateTagQuerySize, filterKeys, resyncIntervalMs: avgResyncIntervalMs - avgResyncIntervalMs / 10 + Math.random() * (avgResyncIntervalMs / 10) }); this.revalidatedTagsMap = new SyncedMap({ client: this.client, keyPrefix, redisKey: REVALIDATED_TAGS_KEY, database, querySize: revalidateTagQuerySize, filterKeys, resyncIntervalMs: avgResyncIntervalMs + avgResyncIntervalMs / 10 + Math.random() * (avgResyncIntervalMs / 10) }); this.inMemoryDeduplicationCache = new SyncedMap({ client: this.client, keyPrefix, redisKey: "inMemoryDeduplicationCache", database, querySize: revalidateTagQuerySize, filterKeys, customizedSync: { withoutRedisHashmap: true, withoutSetSync: true } }); const redisGet = this.client.get.bind(this.client); this.redisDeduplicationHandler = new DeduplicatedRequestHandler( redisGet, inMemoryCachingTime, this.inMemoryDeduplicationCache ); this.redisGet = redisGet; this.deduplicatedRedisGet = this.redisDeduplicationHandler.deduplicatedFunction; } catch (error) { console.error( "RedisStringsHandler constructor error", error, killContainerOnErrorCount++ ); if (killContainerOnErrorThreshold > 0 && killContainerOnErrorCount >= killContainerOnErrorThreshold) { console.error( "RedisStringsHandler constructor error threshold reached, disconnecting and exiting (please implement a restart process/container watchdog to handle this error)", error, killContainerOnErrorCount++ ); process.exit(1); } throw error; } } resetRequestCache() { } async assertClientIsReady() { if (this.clientReadyCalls > 10) { throw new Error( "assertClientIsReady called more than 10 times without being ready." ); } await Promise.race([ Promise.all([ this.sharedTagsMap.waitUntilReady(), this.revalidatedTagsMap.waitUntilReady() ]), new Promise( (_, reject) => setTimeout(() => { reject( new Error( "assertClientIsReady: Timeout waiting for Redis maps to be ready" ) ); }, 3e4) ) ]); this.clientReadyCalls = 0; if (!this.client.isReady) { throw new Error( "assertClientIsReady: Redis client is not ready yet or connection is lost." ); } } async get(key, ctx) { try { if (ctx.kind !== "APP_ROUTE" && ctx.kind !== "APP_PAGE" && ctx.kind !== "FETCH") { console.warn( "RedisStringsHandler.get() called with", key, ctx, " this cache handler is only designed and tested for kind APP_ROUTE and APP_PAGE and not for kind ", ctx?.kind ); } debug("green", "RedisStringsHandler.get() called with", key, ctx); await this.assertClientIsReady(); const clientGet = this.redisGetDeduplication ? this.deduplicatedRedisGet(key) : this.redisGet; const serializedCacheEntry = await redisErrorHandler( "RedisStringsHandler.get(), operation: get" + (this.redisGetDeduplication ? "deduplicated" : "") + " " + this.getTimeoutMs + "ms " + this.keyPrefix + " " + key, clientGet( commandOptions({ signal: AbortSignal.timeout(this.getTimeoutMs) }), this.keyPrefix + key ) ); debug( "green", "RedisStringsHandler.get() finished with result (serializedCacheEntry)", serializedCacheEntry?.substring(0, 200) ); if (!serializedCacheEntry) { return null; } const cacheEntry = JSON.parse( serializedCacheEntry, bufferAndMapReviver ); debug( "green", "RedisStringsHandler.get() finished with result (cacheEntry)", JSON.stringify(cacheEntry).substring(0, 200) ); if (!cacheEntry) { return null; } if (!cacheEntry?.tags) { console.warn( "RedisStringsHandler.get() called with", key, ctx, "cacheEntry is mall formed (missing tags)" ); } if (!cacheEntry?.value) { console.warn( "RedisStringsHandler.get() called with", key, ctx, "cacheEntry is mall formed (missing value)" ); } if (!cacheEntry?.lastModified) { console.warn( "RedisStringsHandler.get() called with", key, ctx, "cacheEntry is mall formed (missing lastModified)" ); } if (ctx.kind === "FETCH") { const combinedTags = /* @__PURE__ */ new Set([ ...ctx?.softTags || [], ...ctx?.tags || [] ]); if (combinedTags.size === 0) { return cacheEntry; } for (const tag of combinedTags) { const revalidationTime = this.revalidatedTagsMap.get(tag); if (revalidationTime && revalidationTime > cacheEntry.lastModified) { const redisKey = this.keyPrefix + key; this.client.unlink(redisKey).catch((err) => { console.error( "Error occurred while unlinking stale data. Error was:", err ); }).finally(async () => { await this.sharedTagsMap.delete(key); await this.revalidatedTagsMap.delete(tag); }); debug( "green", 'RedisStringsHandler.get() found revalidation time for tag. Cache entry is stale and will be deleted and "null" will be returned.', tag, redisKey, revalidationTime, cacheEntry ); return null; } } } return cacheEntry; } catch (error) { console.error( "RedisStringsHandler.get() Error occurred while getting cache entry. Returning null so site can continue to serve content while cache is disabled. The original error was:", error, killContainerOnErrorCount++ ); if (this.killContainerOnErrorThreshold > 0 && killContainerOnErrorCount >= this.killContainerOnErrorThreshold) { console.error( "RedisStringsHandler get() error threshold reached, disconnecting and exiting (please implement a restart process/container watchdog to handle this error)", error, killContainerOnErrorCount ); this.client.disconnect(); this.client.quit(); setTimeout(() => { process.exit(1); }, 500); } return null; } } async set(key, data, ctx) { try { if (data.kind !== "APP_ROUTE" && data.kind !== "APP_PAGE" && data.kind !== "FETCH") { console.warn( "RedisStringsHandler.set() called with", key, ctx, data, " this cache handler is only designed and tested for kind APP_ROUTE and APP_PAGE and not for kind ", data?.kind ); } await this.assertClientIsReady(); if (data.kind === "APP_PAGE" || data.kind === "APP_ROUTE") { const tags = data.headers["x-next-cache-tags"]?.split(","); ctx.tags = [...ctx.tags || [], ...tags || []]; } const cacheEntry = { lastModified: Date.now(), tags: ctx?.tags || [], value: data }; const serializedCacheEntry = JSON.stringify( cacheEntry, bufferAndMapReplacer ); if (this.redisGetDeduplication) { this.redisDeduplicationHandler.seedRequestReturn( key, serializedCacheEntry ); } const revalidate = ( // For fetch requests in newest versions, the revalidate context property is never used, and instead the revalidate property of the passed-in data is used data.kind === "FETCH" && data.revalidate || ctx.revalidate || ctx.cacheControl?.revalidate || data?.revalidate ); const expireAt = revalidate && Number.isSafeInteger(revalidate) && revalidate > 0 ? this.estimateExpireAge(revalidate) : this.estimateExpireAge(this.defaultStaleAge); const setOperation = redisErrorHandler( "RedisStringsHandler.set(), operation: set " + this.keyPrefix + " " + key, this.client.set(this.keyPrefix + key, serializedCacheEntry, { EX: expireAt }) ); debug( "blue", "RedisStringsHandler.set() will set the following serializedCacheEntry", this.keyPrefix, key, data, ctx, serializedCacheEntry?.substring(0, 200), expireAt ); let setTagsOperation; if (ctx.tags && ctx.tags.length > 0) { const currentTags = this.sharedTagsMap.get(key); const currentIsSameAsNew = currentTags?.length === ctx.tags.length && currentTags.every((v) => ctx.tags.includes(v)) && ctx.tags.every((v) => currentTags.includes(v)); if (!currentIsSameAsNew) { setTagsOperation = this.sharedTagsMap.set( key, structuredClone(ctx.tags) ); } } debug( "blue", "RedisStringsHandler.set() will set the following sharedTagsMap", key, ctx.tags ); await Promise.all([setOperation, setTagsOperation]); } catch (error) { console.error( "RedisStringsHandler.set() Error occurred while setting cache entry. The original error was:", error, killContainerOnErrorCount++ ); if (this.killContainerOnErrorThreshold > 0 && killContainerOnErrorCount >= this.killContainerOnErrorThreshold) { console.error( "RedisStringsHandler set() error threshold reached, disconnecting and exiting (please implement a restart process/container watchdog to handle this error)", error, killContainerOnErrorCount ); this.client.disconnect(); this.client.quit(); setTimeout(() => { process.exit(1); }, 500); } throw error; } } // eslint-disable-next-line @typescript-eslint/no-explicit-any async revalidateTag(tagOrTags, ...rest) { try { debug( "red", "RedisStringsHandler.revalidateTag() called with", tagOrTags, rest ); const tags = new Set([tagOrTags || []].flat()); await this.assertClientIsReady(); const keysToDelete = /* @__PURE__ */ new Set(); for (const tag of tags) { if (tag.startsWith(NEXT_CACHE_IMPLICIT_TAG_ID)) { const now = Date.now(); debug( "red", "RedisStringsHandler.revalidateTag() set revalidation time for tag", tag, "to", now ); await this.revalidatedTagsMap.set(tag, now); } } for (const [key, sharedTags] of this.sharedTagsMap.entries()) { if (sharedTags.some((tag) => tags.has(tag))) { keysToDelete.add(key); } } debug( "red", "RedisStringsHandler.revalidateTag() found", keysToDelete, "keys to delete" ); if (keysToDelete.size === 0) { return; } const redisKeys = Array.from(keysToDelete); const fullRedisKeys = redisKeys.map((key) => this.keyPrefix + key); const deleteKeysOperation = redisErrorHandler( "RedisStringsHandler.revalidateTag(), operation: unlink " + this.keyPrefix + " " + fullRedisKeys, this.client.unlink(fullRedisKeys) ); if (this.redisGetDeduplication && this.inMemoryCachingTime > 0) { for (const key of keysToDelete) { this.inMemoryDeduplicationCache.delete(key); } } const deleteTagsOperation = this.sharedTagsMap.delete(redisKeys); await Promise.all([deleteKeysOperation, deleteTagsOperation]); debug( "red", "RedisStringsHandler.revalidateTag() finished delete operations" ); } catch (error) { console.error( "RedisStringsHandler.revalidateTag() Error occurred while revalidating tags. The original error was:", error, killContainerOnErrorCount++ ); if (this.killContainerOnErrorThreshold > 0 && killContainerOnErrorCount >= this.killContainerOnErrorThreshold) { console.error( "RedisStringsHandler revalidateTag() error threshold reached, disconnecting and exiting (please implement a restart process/container watchdog to handle this error)", error, killContainerOnErrorCount ); this.client.disconnect(); this.client.quit(); setTimeout(() => { process.exit(1); }, 500); } throw error; } } }; // src/CachedHandler.ts var cachedHandler; var CachedHandler = class { constructor(options) { if (!cachedHandler) { console.log("created cached handler"); cachedHandler = new RedisStringsHandler(options); } } get(...args) { debugVerbose("CachedHandler.get called with", args); return cachedHandler.get(...args); } set(...args) { debugVerbose("CachedHandler.set called with", args); return cachedHandler.set(...args); } revalidateTag(...args) { debugVerbose("CachedHandler.revalidateTag called with", args); return cachedHandler.revalidateTag(...args); } resetRequestCache(...args) { return cachedHandler.resetRequestCache(...args); } }; // src/index.ts var index_default = CachedHandler; export { RedisStringsHandler, index_default as default }; //# sourceMappingURL=index.mjs.map