UNPKG

create-questpie-app

Version:

Bootstrap a new QUESTPIE Studio application with your choice of template and packages

622 lines (535 loc) 17.9 kB
import Redis from 'ioredis' import { promises as fs } from 'node:fs' import { join } from 'node:path' import { tmpdir } from 'node:os' import { PHASE_PRODUCTION_BUILD } from 'next/constants.js' // Global cache configuration - singleton pattern to avoid multiple instantiations function initializeCacheConfig(options = {}) { if (global.cacheHandlerConfig) { return global.cacheHandlerConfig } global.cacheHandlerConfig = { options, client: null, memoryCache: new Map(), requestCache: new Map(), isRedisConnected: false, isFilesystemEnabled: true, cacheDir: options?.cacheDir || join(tmpdir(), 'nextjs-cache'), maxMemoryCacheSize: options?.maxMemoryCacheSize || 1000, defaultTTL: options?.defaultTTL || 60 * 60 * 24, // 24 hours // Connection status tracking connectionAttempts: 0, maxConnectionAttempts: 3, isConnecting: false, } // Only initialize cache systems if we're not in build phase if (process.env.NEXT_PHASE !== PHASE_PRODUCTION_BUILD) { initializeFilesystemCache() initRedisConnection() } return global.cacheHandlerConfig } /** * Initialize filesystem cache directory */ async function initializeFilesystemCache() { const config = global.cacheHandlerConfig if (!config) return try { await fs.mkdir(config.cacheDir, { recursive: true }) await fs.mkdir(join(config.cacheDir, 'tags'), { recursive: true }) // Filesystem cache initialized successfully } catch (error) { console.error('Failed to initialize filesystem cache:', error) config.isFilesystemEnabled = false } } /** * Initialize Redis connection with retry logic */ async function initRedisConnection() { const config = global.cacheHandlerConfig if (!config) return if (config.isConnecting || config.connectionAttempts >= config.maxConnectionAttempts) { return } config.isConnecting = true config.connectionAttempts++ try { const connectionOptions = createConnectionOptions() // Skip Redis initialization if no configuration is provided if (!connectionOptions) { // No Redis configuration found, using memory cache only config.isConnecting = false return } config.client = new Redis(connectionOptions) config.client.on('error', (err) => { console.error('Redis cache handler error:', err) config.isRedisConnected = false }) config.client.on('connect', () => { config.isRedisConnected = true config.connectionAttempts = 0 // Reset on successful connection // Redis cache handler connected }) config.client.on('ready', () => { config.isRedisConnected = true }) config.client.on('close', () => { config.isRedisConnected = false // Redis cache handler disconnected }) // Test connection await config.client.ping() } catch (error) { console.error(`Redis connection attempt ${config.connectionAttempts} failed:`, error) config.isRedisConnected = false if (config.client) { config.client.disconnect() config.client = null } // Retry connection after delay if we haven't exceeded max attempts if (config.connectionAttempts < config.maxConnectionAttempts) { setTimeout( () => { config.isConnecting = false initRedisConnection() }, 2 ** config.connectionAttempts * 1000, ) // Exponential backoff } else { // Max Redis connection attempts exceeded, falling back to memory cache } } finally { config.isConnecting = false } } /** * Generate filesystem path for cache key */ function getFilesystemPath(key) { const config = global.cacheHandlerConfig if (!config) return null // Use a simple hash to create a file path to avoid filesystem issues with special characters const hash = Buffer.from(key).toString('base64').replace(/[/+=]/g, '_') return join(config.cacheDir, `${hash}.json`) } /** * Generate filesystem path for tag */ function getTagPath(tag) { const config = global.cacheHandlerConfig if (!config) return null const hash = Buffer.from(tag).toString('base64').replace(/[/+=]/g, '_') return join(config.cacheDir, 'tags', `${hash}.json`) } /** * Read cache data from filesystem (only used when Redis is down) */ async function readFromFilesystem(key) { const config = global.cacheHandlerConfig if (!config || !config.isFilesystemEnabled) return null try { const filePath = getFilesystemPath(key) if (!filePath) return null const data = await fs.readFile(filePath, 'utf8') const parsed = JSON.parse(data) // Check if data is expired if (parsed.expiresAt && Date.now() > parsed.expiresAt) { await deleteFromFilesystem(key) return null } return parsed } catch (error) { // File doesn't exist or is corrupted return null } } /** * Write cache data to filesystem (only used when Redis is down) */ async function writeToFilesystem(key, data, ttlSeconds) { const config = global.cacheHandlerConfig if (!config || !config.isFilesystemEnabled) return try { const filePath = getFilesystemPath(key) if (!filePath) return const cacheData = { ...data, expiresAt: Date.now() + (ttlSeconds || config.defaultTTL) * 1000, } await fs.writeFile(filePath, JSON.stringify(cacheData), 'utf8') } catch (error) { console.error('Failed to write to filesystem cache:', error) } } /** * Delete cache data from filesystem */ async function deleteFromFilesystem(key) { const config = global.cacheHandlerConfig if (!config || !config.isFilesystemEnabled) return try { const filePath = getFilesystemPath(key) if (!filePath) return await fs.unlink(filePath) } catch (error) { // File doesn't exist, ignore } } /** * Read tag data from filesystem */ async function readTagFromFilesystem(tag) { const config = global.cacheHandlerConfig if (!config || !config.isFilesystemEnabled) return [] try { const filePath = getTagPath(tag) if (!filePath) return [] const data = await fs.readFile(filePath, 'utf8') return JSON.parse(data) } catch (error) { return [] } } /** * Write tag data to filesystem */ async function writeTagToFilesystem(tag, keys) { const config = global.cacheHandlerConfig if (!config || !config.isFilesystemEnabled) return try { const filePath = getTagPath(tag) if (!filePath) return await fs.writeFile(filePath, JSON.stringify(keys), 'utf8') } catch (error) { console.error('Failed to write tag to filesystem cache:', error) } } /** * Delete tag data from filesystem */ async function deleteTagFromFilesystem(tag) { const config = global.cacheHandlerConfig if (!config || !config.isFilesystemEnabled) return try { const filePath = getTagPath(tag) if (!filePath) return await fs.unlink(filePath) } catch (error) { // File doesn't exist, ignore } } /** * Limit memory cache size to prevent memory leaks */ function limitMemoryCache() { const config = global.cacheHandlerConfig if (!config) return while (config.memoryCache.size > config.maxMemoryCacheSize) { const firstKey = config.memoryCache.keys().next().value if (firstKey) { config.memoryCache.delete(firstKey) } else { break } } } /** * Create Redis connection options based on environment configuration */ function createConnectionOptions() { // Check if any Redis configuration exists if (!process.env.REDIS_URL && !process.env.REDIS_SENTINEL_HOSTS) { return null } const keyPrefix = process.env.STORAGE_PREFIX ? `${process.env.STORAGE_PREFIX}:` : undefined // Check if Sentinel configuration is provided if (process.env.REDIS_SENTINEL_HOSTS && process.env.REDIS_SENTINEL_NAME) { return createSentinelOptions(keyPrefix) } // Fall back to URL-based connection const options = parseRedisUrl(process.env.REDIS_URL || 'redis://localhost:6379') return { ...options, keyPrefix, retryDelayOnFailover: 100, enableReadyCheck: false, maxRetriesPerRequest: 3, lazyConnect: true, connectTimeout: 10000, commandTimeout: 5000, reconnectOnError: (err) => { const targetError = 'READONLY' return err.message.includes(targetError) }, } } /** * Create Sentinel connection options */ function createSentinelOptions(keyPrefix) { const sentinels = process.env.REDIS_SENTINEL_HOSTS.split(',').map((hostPort) => { const [host, port] = hostPort.trim().split(':') return { host: host || 'localhost', port: port ? Number.parseInt(port, 10) : 26379, } }) return { sentinels, name: process.env.REDIS_SENTINEL_NAME, password: process.env.REDIS_PASSWORD, sentinelPassword: process.env.REDIS_SENTINEL_PASSWORD, keyPrefix, retryDelayOnFailover: 100, enableReadyCheck: false, maxRetriesPerRequest: 3, lazyConnect: true, connectTimeout: 10000, commandTimeout: 5000, sentinelRetryDelayOnFailover: 100, sentinelReconnectOnError: (err) => { const targetError = 'READONLY' return err.message.includes(targetError) }, reconnectOnError: (err) => { const targetError = 'READONLY' return err.message.includes(targetError) }, } } /** * Parse Redis URL into connection options * This is needed when we need to add specific options like maxRetriesPerRequest */ function parseRedisUrl(url) { try { const urlObj = new URL(url) return { host: urlObj.hostname, port: urlObj.port ? Number.parseInt(urlObj.port, 10) : 6379, password: urlObj.password || process.env.REDIS_PASSWORD || undefined, username: urlObj.username || undefined, db: urlObj.pathname ? Number.parseInt(urlObj.pathname.slice(1), 10) : 0, } } catch { // If URL parsing fails, assume it's a host:port format or just host const [host, port] = url.split(':') return { host: host || 'localhost', port: port ? Number.parseInt(port, 10) : 6379, password: process.env.REDIS_PASSWORD || undefined, } } } class CacheHandler { constructor(options) { // Initialize or get existing global config initializeCacheConfig(options) } async get(key) { const config = global.cacheHandlerConfig if (!config) return null try { // 1. Always check request cache first (fastest) const requestCached = config.requestCache.get(key) if (requestCached) { // Check if data is expired if (requestCached.expiresAt && Date.now() > requestCached.expiresAt) { config.requestCache.delete(key) } else { return requestCached } } // 2. If Redis is available, use Redis (distributed environment) if (config.isRedisConnected && config.client) { try { const cached = await config.client.get(`nextjs:cache:${key}`) if (cached) { const data = JSON.parse(cached) // Store in request cache for this request config.requestCache.set(key, data) return data } } catch (redisError) { console.error('Redis get error:', redisError) // Continue to memory/filesystem fallback } } // 3. If Redis is down, use memory cache first if (!config.isRedisConnected) { const memoryCached = config.memoryCache.get(key) if (memoryCached) { // Check if data is expired if (memoryCached.expiresAt && Date.now() > memoryCached.expiresAt) { config.memoryCache.delete(key) } else { // Store in request cache for this request config.requestCache.set(key, memoryCached) return memoryCached } } // 4. If memory cache miss and Redis is down, try filesystem const filesystemData = await readFromFilesystem(key) if (filesystemData) { // Store in memory and request cache config.memoryCache.set(key, filesystemData) config.requestCache.set(key, filesystemData) limitMemoryCache() return filesystemData } } return null } catch (error) { console.error('Cache get error:', error) return null } } async set(key, data, ctx) { const config = global.cacheHandlerConfig if (!config) return const now = Date.now() const ttlSeconds = ctx?.revalidate || config.defaultTTL const cacheData = { value: data, lastModified: now, expiresAt: now + ttlSeconds * 1000, tags: ctx?.tags || [], } try { // 1. Always store in request cache (fastest for current request) config.requestCache.set(key, cacheData) // 2. If Redis is available, store in Redis (distributed environment) if (config.isRedisConnected && config.client) { try { await config.client.setex(`nextjs:cache:${key}`, ttlSeconds, JSON.stringify(cacheData)) // Store tag mappings for invalidation if (ctx?.tags && ctx.tags.length > 0) { const tagPromises = ctx.tags.map(async (tag) => { try { await config.client.sadd(`nextjs:tags:${tag}`, key) await config.client.expire(`nextjs:tags:${tag}`, ttlSeconds) } catch (tagError) { console.error(`Failed to store tag ${tag}:`, tagError) } }) await Promise.allSettled(tagPromises) } } catch (redisError) { console.error('Redis set error:', redisError) // Continue to memory/filesystem fallback } } // 3. If Redis is down, use memory + filesystem strategy if (!config.isRedisConnected) { // Store in memory cache config.memoryCache.set(key, cacheData) limitMemoryCache() // Store in filesystem as persistent fallback await writeToFilesystem(key, cacheData, ttlSeconds) // Store tag mappings in filesystem if (ctx?.tags && ctx.tags.length > 0) { for (const tag of ctx.tags) { try { const existingKeys = await readTagFromFilesystem(tag) const updatedKeys = [...new Set([...existingKeys, key])] await writeTagToFilesystem(tag, updatedKeys) } catch (tagError) { console.error(`Failed to store filesystem tag ${tag}:`, tagError) } } } } } catch (error) { console.error('Cache set error:', error) // Even if everything fails, we still have request cache } } async revalidateTag(tags) { const config = global.cacheHandlerConfig if (!config) return 0 const tagsArray = Array.isArray(tags) ? tags : [tags] try { const allKeysToInvalidate = new Set() // 1. If Redis is available, handle Redis tag invalidation (distributed) if (config.isRedisConnected && config.client) { try { for (const tag of tagsArray) { const keys = await config.client.smembers(`nextjs:tags:${tag}`) for (const key of keys) { allKeysToInvalidate.add(key) } if (keys.length > 0) { // Delete all cache entries with this tag const deletePromises = keys.map((key) => config.client.del(`nextjs:cache:${key}`)) await Promise.allSettled(deletePromises) } // Clean up the tag set await config.client.del(`nextjs:tags:${tag}`) } } catch (redisError) { console.error('Redis revalidateTag error:', redisError) } } // 2. If Redis is down, handle local cache invalidation (memory + filesystem) if (!config.isRedisConnected) { // Handle filesystem tag invalidation for (const tag of tagsArray) { try { const keys = await readTagFromFilesystem(tag) for (const key of keys) { allKeysToInvalidate.add(key) await deleteFromFilesystem(key) } await deleteTagFromFilesystem(tag) } catch (filesystemError) { console.error('Filesystem revalidateTag error:', filesystemError) } } // Handle memory cache tag invalidation for (const [key, value] of config.memoryCache) { if (value.tags?.some((tag) => tagsArray.includes(tag))) { allKeysToInvalidate.add(key) config.memoryCache.delete(key) } } } // 3. Always handle request cache tag invalidation (both scenarios) for (const [key, value] of config.requestCache) { if (value.tags?.some((tag) => tagsArray.includes(tag))) { allKeysToInvalidate.add(key) config.requestCache.delete(key) } } return allKeysToInvalidate.size } catch (error) { console.error('Cache revalidateTag error:', error) // Fallback: clear local caches only for (const [key, value] of config.memoryCache) { if (value.tags?.some((tag) => tagsArray.includes(tag))) { config.memoryCache.delete(key) } } for (const [key, value] of config.requestCache) { if (value.tags?.some((tag) => tagsArray.includes(tag))) { config.requestCache.delete(key) } } return 0 } } resetRequestCache() { const config = global.cacheHandlerConfig if (!config) return // Clear the request cache to prevent memory leaks between requests config.requestCache.clear() // Optionally, we can also do some cleanup of the memory cache if it's getting too large if (config.memoryCache.size > config.maxMemoryCacheSize * 1.5) { limitMemoryCache() } } } export default CacheHandler