fortify2-js
Version:
MOST POWERFUL JavaScript Security Library! Military-grade cryptography + 19 enhanced object methods + quantum-resistant algorithms + perfect TypeScript support. More powerful than Lodash with built-in security.
407 lines (403 loc) • 14.4 kB
JavaScript
;
var SecureCacheAdapter = require('./SecureCacheAdapter.js');
/**
* FortifyJS Cache Factory
*@version 4.1.8
* Factory for creating optimized cache instances based on configuration.
* Automatically selects the best strategy for ultra-fast performance.
*/
/**
* cache factory with intelligent strategy selection
*/
class CacheFactory {
/**
* Set global default configuration
*/
static setDefaults(config) {
this.defaultConfig = { ...this.defaultConfig, ...config };
}
/**
* Create cache instance based on configuration
*/
static create(config = {}) {
const mergedConfig = { ...this.defaultConfig, ...config };
// Create cache instance ID for singleton pattern
const instanceId = this.generateInstanceId(mergedConfig);
// Return existing instance if available
if (mergedConfig.singleton !== false &&
this.instances.has(instanceId)) {
return this.instances.get(instanceId);
}
// Convert legacy config to new secure config format
const secureConfig = this.buildSecureConfig(mergedConfig);
const instance = new SecureCacheAdapter.SecureCacheAdapter(secureConfig);
// Store instance if singleton mode enabled
if (mergedConfig.singleton !== false) {
this.instances.set(instanceId, instance);
}
return instance;
}
/**
* Build secure config from legacy config
*/
static buildSecureConfig(config) {
return {
strategy: this.determineStrategy(config),
memory: {
maxSize: config.maxSize || 100, // MB
maxEntries: config.maxEntries || 10000,
ttl: (config.ttl || 300) * 1000, // Convert seconds to milliseconds
algorithm: config.memory?.algorithm || "lru",
evictionPolicy: config.memory?.evictionPolicy || "lru",
preallocation: config.memory?.preallocation || false,
},
redis: this.buildRedisConfig(config),
performance: {
batchSize: config.performance?.batchSize || 100,
compressionThreshold: config.compression ? 1024 : Infinity,
hotDataThreshold: config.performance?.hotDataThreshold || 10,
prefetchEnabled: config.performance?.prefetchEnabled !== false,
asyncWrite: config.performance?.asyncWrite !== false,
pipeline: config.performance?.pipeline !== false,
connectionPooling: config.performance?.connectionPooling !== false,
},
security: {
encryption: config.encryption !== false,
keyRotation: config.security?.keyRotation !== false,
accessMonitoring: config.security?.accessMonitoring !== false,
sanitization: config.security?.sanitization !== false,
auditLogging: config.security?.auditLogging || false,
},
monitoring: {
enabled: config.monitoring?.enabled !== false,
metricsInterval: config.monitoring?.metricsInterval || 60000,
alertThresholds: {
memoryUsage: config.monitoring?.alertThresholds?.memoryUsage || 90,
hitRate: config.monitoring?.alertThresholds?.hitRate || 80,
errorRate: config.monitoring?.alertThresholds?.errorRate || 5,
latency: config.monitoring?.alertThresholds?.latency || 100,
},
detailed: config.monitoring?.detailed || false,
},
resilience: {
retryAttempts: config.resilience?.retryAttempts || 3,
retryDelay: config.resilience?.retryDelay || 1000,
circuitBreaker: config.resilience?.circuitBreaker !== false,
fallback: config.resilience?.fallback !== false,
healthCheck: config.resilience?.healthCheck !== false,
},
};
}
/**
* Determine optimal cache strategy
*/
static determineStrategy(config) {
if (config.type === "redis")
return "redis";
if (config.type === "memory")
return "memory";
if (config.type === "hybrid")
return "hybrid";
// Auto-detection logic
const hasRedis = config.redis || process.env.REDIS_URL || process.env.REDIS_HOST;
const memoryLimit = config.maxSize || 100;
const isMemoryConstrained = memoryLimit < 50;
if (hasRedis && !isMemoryConstrained)
return "hybrid";
if (hasRedis)
return "redis";
return "memory";
}
/**
* Build Redis configuration
*/
static buildRedisConfig(config) {
if (!config.redis &&
!process.env.REDIS_URL &&
!process.env.REDIS_HOST) {
return undefined;
}
const redisConfig = config.redis || {};
return {
host: redisConfig.host || process.env.REDIS_HOST || "localhost",
port: redisConfig.port || parseInt(process.env.REDIS_PORT || "6379"),
password: redisConfig.password || process.env.REDIS_PASSWORD,
db: redisConfig.db || parseInt(process.env.REDIS_DB || "0"),
cluster: redisConfig.cluster
? {
enabled: redisConfig.cluster,
nodes: redisConfig.nodes || [
{ host: "localhost", port: 6379 },
],
}
: undefined,
pool: {
min: redisConfig.pool?.min || 2,
max: redisConfig.pool?.max || 10,
acquireTimeoutMillis: redisConfig.pool?.acquireTimeoutMillis || 30000,
idleTimeoutMillis: redisConfig.pool?.idleTimeoutMillis || 30000,
},
sentinel: redisConfig.sentinel
? {
enabled: true,
masters: redisConfig.sentinel.masters || ["mymaster"],
sentinels: redisConfig.sentinel.sentinels || [
{ host: "localhost", port: 26379 },
],
}
: undefined,
};
}
/**
* Generate unique instance ID for singleton pattern
*/
static generateInstanceId(config) {
const key = JSON.stringify({
type: config.type,
redis: config.redis,
maxSize: config.maxSize,
ttl: config.ttl,
});
return Buffer.from(key).toString("base64");
}
/**
* Create memory-only cache for maximum speed
*/
static createMemoryCache(config = {}) {
return this.create({
type: "memory",
maxSize: config.maxSize || 100,
maxEntries: config.maxEntries || 10000,
ttl: config.ttl || 300,
compression: config.compression || false,
encryption: config.encryption !== false,
memory: {
algorithm: config.memory?.algorithm || "lru",
preallocation: config.memory?.preallocation || false,
...config.memory,
},
});
}
/**
* Create Redis-only cache for persistence
*/
static createRedisCache(config = {}) {
return this.create({
type: "redis",
redis: config.redis || {
host: "localhost",
port: 6379,
},
ttl: config.ttl || 300,
compression: config.compression !== false,
encryption: config.encryption !== false,
});
}
/**
* Create hybrid cache for best of both worlds
*/
static createHybridCache(config = {}) {
return this.create({
type: "hybrid",
memory: {
maxSize: config.maxSize || 100,
algorithm: config.memory?.algorithm || "lru",
...config.memory,
},
redis: config.redis || {
host: "localhost",
port: 6379,
},
compression: config.compression !== false,
encryption: config.encryption !== false,
});
}
/**
* Create Redis Cluster cache for high availability
*/
static createClusterCache(nodes, config = {}) {
return this.create({
type: "redis",
redis: {
...config.redis,
cluster: true,
nodes,
},
ttl: config.ttl || 300,
compression: config.compression !== false,
encryption: config.encryption !== false,
});
}
/**
* Create Redis Sentinel cache for high availability
*/
static createSentinelCache(masters, sentinels, config = {}) {
return this.create({
type: "redis",
redis: {
...config.redis,
sentinel: {
enabled: true,
masters,
sentinels,
},
},
ttl: config.ttl || 300,
compression: config.compression !== false,
encryption: config.encryption !== false,
});
}
/**
* Create distributed cache with sharding
*/
static createDistributedCache(shards, config = {}) {
return this.create({
type: "distributed",
redis: {
...config.redis,
shards,
},
ttl: config.ttl || 300,
compression: config.compression !== false,
encryption: config.encryption !== false,
});
}
/**
* Clear all cached instances
*/
static clearInstances() {
this.instances.forEach((instance) => {
instance.disconnect().catch(console.error);
});
this.instances.clear();
}
/**
* Get cache instance by ID
*/
static getInstance(config) {
const instanceId = this.generateInstanceId(config);
return this.instances.get(instanceId) || null;
}
}
CacheFactory.instances = new Map();
CacheFactory.defaultConfig = {};
/**
* Auto-detect best cache strategy based on environment
*/
function createOptimalCache(config = {}) {
// Use the private method through a temporary instance
// FIXME: or we can implement logic here (I don know)
const hasRedis = config.redis || process.env.REDIS_URL || process.env.REDIS_HOST;
const memoryLimit = config.maxSize || 100;
const isMemoryConstrained = memoryLimit < 50;
let strategy;
if (config.type === "redis")
strategy = "redis";
else if (config.type === "memory")
strategy = "memory";
else if (config.type === "hybrid")
strategy = "hybrid";
else if (hasRedis && !isMemoryConstrained)
strategy = "hybrid";
else if (hasRedis)
strategy = "redis";
else
strategy = "memory";
// console.log(`Auto-selected ${strategy.toUpperCase()} cache strategy`);
switch (strategy) {
case "hybrid":
return CacheFactory.createHybridCache(config);
case "redis":
return CacheFactory.createRedisCache(config);
case "memory":
default:
return CacheFactory.createMemoryCache(config);
}
}
/**
* Utility functions
*/
const CacheUtils = {
/**
* Benchmark cache performance
*/
async benchmark(cache, operations = 1000) {
const start = Date.now();
// Write benchmark
const writeStart = Date.now();
for (let i = 0; i < operations; i++) {
await cache.set(`bench:${i}`, {
data: `value${i}`,
timestamp: Date.now(),
});
}
const writeTime = Date.now() - writeStart;
// Read benchmark
const readStart = Date.now();
for (let i = 0; i < operations; i++) {
await cache.get(`bench:${i}`);
}
const readTime = Date.now() - readStart;
// Delete benchmark
const deleteStart = Date.now();
for (let i = 0; i < operations; i++) {
await cache.delete(`bench:${i}`);
}
const deleteTime = Date.now() - deleteStart;
const totalTime = Date.now() - start;
const throughput = (operations * 3) / (totalTime / 1000); // ops per second
return {
writeTime,
readTime,
deleteTime,
throughput,
};
},
/**
* Warm up cache with data
*/
async warmUp(cache, data) {
const batches = [];
const batchSize = 100;
for (let i = 0; i < data.length; i += batchSize) {
batches.push(data.slice(i, i + batchSize));
}
for (const batch of batches) {
await Promise.all(batch.map((item) => cache.set(item.key, item.value, { ttl: item.ttl })));
}
},
/**
* Migration utility
*/
async migrate(source, target, options = {}) {
const { batchSize = 100, preserveTTL = true, keyFilter } = options;
const keys = await source.keys();
const filteredKeys = keyFilter ? keys.filter(keyFilter) : keys;
let migrated = 0;
let failed = 0;
for (let i = 0; i < filteredKeys.length; i += batchSize) {
const batch = filteredKeys.slice(i, i + batchSize);
for (const key of batch) {
try {
const value = await source.get(key);
if (value !== null) {
const ttl = preserveTTL
? await source.getTTL(key)
: undefined;
await target.set(key, value, { ttl });
migrated++;
}
}
catch (error) {
console.error(`Failed to migrate key ${key}:`, error);
failed++;
}
}
}
return { migrated, failed };
},
};
exports.CacheFactory = CacheFactory;
exports.CacheUtils = CacheUtils;
exports.createOptimalCache = createOptimalCache;
//# sourceMappingURL=CacheFactory.js.map