@yihuangdb/storage-object
Version:
A Node.js storage object layer library using Redis OM
1,079 lines • 44.2 kB
JavaScript
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.SchemaRegistry = void 0;
exports.getSchemaRegistry = getSchemaRegistry;
const storage_1 = require("./storage");
const redis_1 = require("redis");
const performance_profiler_1 = require("./performance-profiler");
const redis_key_manager_1 = require("./redis-key-manager");
const schema_versioning_1 = require("./schema-versioning");
class SchemaRegistry {
static instance;
static REGISTRY_KEY; // Will be set using RedisKeyManager
registeredSchemas = new Map();
storageInstances = new Map();
versionManagers = new Map();
client = null;
options;
metricsInterval = null;
registryKey;
keyManager = (0, redis_key_manager_1.getRedisKeyManager)();
initialized = false;
initializationPromise = null;
compiledSchemas = new Map(); // Cache compiled schemas
constructor(options = {}) {
this.options = {
registryPrefix: options.registryPrefix || 'schema-registry',
autoTrack: options.autoTrack !== false,
persistMetadata: options.persistMetadata !== false,
metadataRefreshInterval: options.metadataRefreshInterval || 60000, // 1 minute
...options,
};
// Use RedisKeyManager for consistent key generation
this.registryKey = this.keyManager.getRegistrySchemasKey();
SchemaRegistry.REGISTRY_KEY = this.registryKey;
}
static getInstance(options) {
if (!SchemaRegistry.instance) {
SchemaRegistry.instance = new SchemaRegistry(options);
// Start initialization but don't await here
SchemaRegistry.instance.initialize().catch(error => {
console.error('Failed to initialize SchemaRegistry:', error);
});
}
return SchemaRegistry.instance;
}
/**
* Reset the singleton instance (for testing or reinitialization)
*/
static async reset() {
if (SchemaRegistry.instance) {
await SchemaRegistry.instance.shutdown();
SchemaRegistry.instance = null;
}
}
/**
* Check if the registry is healthy
* @returns true if registry is operating normally
*/
async isHealthy() {
try {
if (!this.initialized) {
await this.initialize();
}
if (!this.client || !this.client.isReady) {
return false;
}
const pong = await this.client.ping();
return pong === 'PONG';
}
catch (error) {
return false;
}
}
/**
* Get registry statistics
* @returns Registry statistics including schema count, total operations, etc.
*/
async getStats() {
if (!this.initialized) {
await this.initialize();
}
const all = await this.getAllSchemas();
let totalOperations = 0;
let activeSchemas = 0;
let deprecatedSchemas = 0;
for (const metadata of all) {
if (metadata.statistics) {
totalOperations += metadata.statistics.totalOperations || 0;
}
if (metadata.status === 'active')
activeSchemas++;
if (metadata.status === 'deprecated')
deprecatedSchemas++;
}
return {
schemaCount: all.length,
totalOperations,
activeSchemas,
deprecatedSchemas,
};
}
async initialize() {
if (this.initialized)
return;
if (this.initializationPromise)
return this.initializationPromise;
this.initializationPromise = this.doInitialize();
try {
await this.initializationPromise;
this.initialized = true;
}
finally {
this.initializationPromise = null;
}
}
async doInitialize() {
// Run initialization tasks in parallel where possible
const tasks = [];
if (this.options.persistMetadata) {
tasks.push(this.initializeRedisClient());
}
await Promise.all(tasks);
if (this.options.autoTrack) {
this.startMetricsCollection();
}
}
async initializeRedisClient() {
if (this.client?.isOpen)
return; // Already connected
const connectionOptions = {};
if (this.options.redisUrl) {
connectionOptions.url = this.options.redisUrl;
}
else {
connectionOptions.socket = {
host: this.options.redisHost || 'localhost',
port: this.options.redisPort || 6379,
};
}
this.client = (0, redis_1.createClient)(connectionOptions);
this.client.on('error', (err) => {
console.error('Schema Registry Redis Error:', err);
// Mark as not initialized on error
this.initialized = false;
});
try {
await this.client.connect();
await this.loadPersistedSchemas();
}
catch (error) {
this.client = null;
throw error;
}
}
async loadPersistedSchemas() {
if (!this.client)
return;
return performance_profiler_1.profiler.measure('registry.loadPersistedSchemas', async () => {
try {
const schemas = await this.client.hGetAll(this.registryKey);
for (const [name, metadataJson] of Object.entries(schemas)) {
try {
const metadata = JSON.parse(metadataJson);
// Convert date strings back to Date objects
metadata.createdAt = new Date(metadata.createdAt);
metadata.updatedAt = new Date(metadata.updatedAt);
metadata.lastAccessedAt = new Date(metadata.lastAccessedAt);
if (metadata.performance?.slowestOperation?.timestamp) {
metadata.performance.slowestOperation.timestamp = new Date(metadata.performance.slowestOperation.timestamp);
}
this.registeredSchemas.set(name, metadata);
}
catch (error) {
console.error(`Failed to parse schema metadata for ${name}:`, error);
}
}
}
catch (error) {
console.error('Failed to load persisted schemas:', error);
}
}, { schemaCount: this.registeredSchemas.size });
}
async register(name, schema, options = {}) {
return performance_profiler_1.profiler.measure(`registry.register.${name}`, async () => {
// Ensure initialized
await this.initialize();
// Validate inputs
if (!name || typeof name !== 'string') {
throw new Error('Schema name must be a non-empty string');
}
if (!schema || typeof schema !== 'object') {
throw new Error('Schema must be a valid object');
}
// Validate schema fields
for (const [fieldName, fieldConfig] of Object.entries(schema)) {
if (!fieldConfig) {
throw new Error(`Field '${fieldName}' has invalid configuration`);
}
const config = typeof fieldConfig === 'string'
? { type: fieldConfig }
: fieldConfig;
// Validate field type
const validTypes = ['string', 'number', 'boolean', 'date', 'text', 'string[]'];
if (!validTypes.includes(config.type)) {
throw new Error(`Field '${fieldName}' has invalid type '${config.type}'. Valid types are: ${validTypes.join(', ')}`);
}
}
// Check if schema already exists
let metadata = this.registeredSchemas.get(name);
if (metadata) {
// Update existing schema (version is managed by SchemaVersionManager)
metadata.schema = schema;
metadata.options = options;
metadata.updatedAt = new Date();
metadata.attributes = this.analyzeSchema(schema);
}
else {
// Create new metadata
metadata = {
name,
version: 1,
schema,
prefix: options.prefix || 'storage',
options,
createdAt: new Date(),
updatedAt: new Date(),
lastAccessedAt: new Date(),
statistics: {
objectCount: 0,
createCount: 0,
readCount: 0,
updateCount: 0,
deleteCount: 0,
totalOperations: 0,
averageObjectSize: 0,
totalStorageSize: 0,
},
attributes: this.analyzeSchema(schema),
performance: {
averageCreateTime: 0,
averageReadTime: 0,
averageUpdateTime: 0,
averageDeleteTime: 0,
slowestOperation: {
type: 'none',
duration: 0,
timestamp: new Date(),
},
},
status: 'active',
};
}
this.registeredSchemas.set(name, metadata);
// Initialize version manager if not already present
if (!this.versionManagers.has(name) && this.client) {
const versionManager = new schema_versioning_1.SchemaVersionManager(name, this.client);
// Determine data structure (use JSON if available, otherwise HASH)
const dataStructure = options.useJSON !== false ? 'JSON' : 'HASH';
// Get indexed fields
const indexedFields = Object.entries(schema)
.filter(([_, config]) => {
const fieldConfig = typeof config === 'string' ? { indexed: false } : config;
return fieldConfig.indexed;
})
.map(([fieldName]) => fieldName);
// Initialize the version manager with schema information
await versionManager.initialize(schema, indexedFields, dataStructure);
this.versionManagers.set(name, versionManager);
// Sync version from version manager back to metadata
const versionMetadata = await versionManager.getMetadata();
if (versionMetadata) {
metadata.version = versionMetadata.version;
}
}
else if (this.versionManagers.has(name) && this.client) {
// Version manager already exists - only update if schema actually changed
const versionManager = this.versionManagers.get(name);
const existingMetadata = await versionManager.getMetadata();
if (existingMetadata) {
const dataStructure = options.useJSON !== false ? 'JSON' : 'HASH';
const indexedFields = Object.entries(schema)
.filter(([_, config]) => {
const fieldConfig = typeof config === 'string' ? { indexed: false } : config;
return fieldConfig.indexed;
})
.map(([fieldName]) => fieldName);
const newFieldsStr = JSON.stringify(schema);
const newIndexesStr = JSON.stringify(indexedFields);
// Only re-initialize if there's an actual structural change
if (existingMetadata.fields !== newFieldsStr ||
existingMetadata.indexes !== newIndexesStr ||
existingMetadata.dataStructure !== dataStructure) {
await versionManager.initialize(schema, indexedFields, dataStructure);
}
// Always sync version from version manager back to metadata
metadata.version = existingMetadata.version;
}
}
// Persist to Redis if enabled
if (this.options.persistMetadata && this.client) {
await this.persistSchema(name, metadata);
}
return metadata;
}, { schemaName: name });
}
analyzeSchema(schema) {
const fields = {};
let fieldCount = 0;
let indexedFieldCount = 0;
for (const [fieldName, fieldConfig] of Object.entries(schema)) {
fieldCount++;
const config = typeof fieldConfig === 'string'
? { type: fieldConfig, indexed: false }
: fieldConfig;
if (config.indexed) {
indexedFieldCount++;
}
fields[fieldName] = {
type: config.type,
indexed: config.indexed || false,
nullCount: 0,
uniqueValues: 0,
};
}
return {
fieldCount,
indexedFieldCount,
fields,
};
}
async persistSchema(name, metadata) {
if (!this.client)
return;
try {
const metadataJson = JSON.stringify(metadata);
await this.client.hSet(this.registryKey, name, metadataJson);
}
catch (error) {
console.error(`Failed to persist schema ${name}:`, error);
}
}
async getStorage(name) {
return performance_profiler_1.profiler.measure(`registry.getStorage.${name}`, async () => {
// Ensure initialized
await this.initialize();
// Check if we already have an instance
let storage = this.storageInstances.get(name);
if (storage) {
this.updateLastAccessed(name);
return storage;
}
// Check if schema is registered
const metadata = this.registeredSchemas.get(name);
if (!metadata) {
// Try to load from Redis if not in memory
if (this.client?.isOpen) {
await this.loadPersistedSchemas();
const loadedMetadata = this.registeredSchemas.get(name);
if (loadedMetadata) {
// Avoid race condition: check again if instance was created
const existingStorage = this.storageInstances.get(name);
if (existingStorage) {
return existingStorage;
}
return this.createStorageInstance(name, loadedMetadata);
}
}
return null;
}
// Avoid race condition: check again if instance was created while we were checking
const existingStorage = this.storageInstances.get(name);
if (existingStorage) {
return existingStorage;
}
return this.createStorageInstance(name, metadata);
}, { schemaName: name });
}
async registerStorageInstance(name, storage) {
// Wrap storage methods to track statistics
this.wrapStorageMethods(storage, name);
// Store the instance
this.storageInstances.set(name, storage);
this.updateLastAccessed(name);
}
creatingInstances = new Map();
async createStorageInstance(name, metadata) {
// Double-check to avoid race condition
const existingStorage = this.storageInstances.get(name);
if (existingStorage) {
return existingStorage;
}
// Check if another request is already creating this instance
const pendingCreation = this.creatingInstances.get(name);
if (pendingCreation) {
return pendingCreation;
}
// Create a promise for this creation attempt
const creationPromise = this.doCreateStorageInstance(name, metadata)
.catch(error => {
// Ensure cleanup happens even on error
this.creatingInstances.delete(name);
throw error;
});
this.creatingInstances.set(name, creationPromise);
try {
const result = await creationPromise;
return result;
}
finally {
// Clean up the pending creation
this.creatingInstances.delete(name);
}
}
async doCreateStorageInstance(name, metadata) {
return performance_profiler_1.profiler.measure(`registry.createStorageInstance.${name}`, async () => {
try {
const storage = new storage_1.StorageObject(name, metadata.schema, metadata.options);
// Try to initialize, but catch index already exists error
try {
await storage.initialize();
}
catch (initError) {
// If index already exists, it means another concurrent operation created it
if (initError?.message?.includes('Index already exists')) {
// Check if another instance was created while we were trying
const checkAgain = this.storageInstances.get(name);
if (checkAgain) {
await storage.disconnect();
return checkAgain;
}
// Otherwise, continue with this instance (index exists is OK)
}
else {
// Re-throw other errors
await storage.disconnect();
throw initError;
}
}
// Wrap storage methods to track statistics
this.wrapStorageMethods(storage, name);
// Final check before setting to avoid race condition
const finalCheck = this.storageInstances.get(name);
if (finalCheck) {
// Another thread created it, use that one and clean up ours
await storage.disconnect();
return finalCheck;
}
this.storageInstances.set(name, storage);
this.updateLastAccessed(name);
return storage;
}
catch (error) {
// Only log non-index-exists errors
if (!error?.message?.includes('Index already exists')) {
console.error(`Failed to create storage instance for ${name}:`, error);
}
throw error;
}
}, { schemaName: name });
}
wrapStorageMethods(storage, name) {
const metadata = this.registeredSchemas.get(name);
if (!metadata)
return;
// Wrap create method
const originalCreate = storage.create.bind(storage);
storage.create = async (data) => {
const startTime = Date.now();
try {
const result = await originalCreate(data);
this.updateStatistics(name, 'create', Date.now() - startTime);
// Track entity in version manager if available
const versionManager = this.versionManagers.get(name);
if (versionManager && result && result.entityId) {
await versionManager.trackEntity(result.entityId);
}
return result;
}
catch (error) {
// Still track failed operations
this.updateStatistics(name, 'create', Date.now() - startTime, true);
throw error;
}
};
// Wrap findById method
const originalFindById = storage.findById.bind(storage);
storage.findById = async (id) => {
const startTime = Date.now();
try {
const result = await originalFindById(id);
this.updateStatistics(name, 'read', Date.now() - startTime);
return result;
}
catch (error) {
this.updateStatistics(name, 'read', Date.now() - startTime, true);
throw error;
}
};
// Wrap find method
const originalFind = storage.find.bind(storage);
storage.find = async (query, options) => {
const startTime = Date.now();
try {
const result = await originalFind(query, options);
this.updateStatistics(name, 'read', Date.now() - startTime);
return result;
}
catch (error) {
this.updateStatistics(name, 'read', Date.now() - startTime, true);
throw error;
}
};
// Wrap update method
const originalUpdate = storage.update.bind(storage);
storage.update = async (id, data, options) => {
const startTime = Date.now();
try {
const result = await originalUpdate(id, data, options);
this.updateStatistics(name, 'update', Date.now() - startTime);
return result;
}
catch (error) {
this.updateStatistics(name, 'update', Date.now() - startTime, true);
throw error;
}
};
// Wrap delete method
const originalDelete = storage.delete.bind(storage);
storage.delete = async (id) => {
const startTime = Date.now();
try {
const result = await originalDelete(id);
if (result) {
this.updateStatistics(name, 'delete', Date.now() - startTime);
}
return result;
}
catch (error) {
this.updateStatistics(name, 'delete', Date.now() - startTime, true);
throw error;
}
};
}
updateStatistics(name, operation, duration, failed = false) {
const metadata = this.registeredSchemas.get(name);
if (!metadata)
return;
// Update operation counts (don't increment for failed operations)
if (!failed) {
metadata.statistics[`${operation}Count`]++;
metadata.statistics.totalOperations++;
}
// Update performance metrics
const perfKey = `average${operation.charAt(0).toUpperCase() + operation.slice(1)}Time`;
const currentAvg = metadata.performance[perfKey];
const count = metadata.statistics[`${operation}Count`];
metadata.performance[perfKey] = ((currentAvg * (count - 1)) + duration) / count;
// Track slowest operation
if (duration > metadata.performance.slowestOperation.duration) {
metadata.performance.slowestOperation = {
type: operation,
duration,
timestamp: new Date(),
};
}
metadata.lastAccessedAt = new Date();
// Persist updates periodically
if (this.options.persistMetadata && metadata.statistics.totalOperations % 100 === 0) {
this.persistSchema(name, metadata).catch(console.error);
}
}
updateLastAccessed(name) {
const metadata = this.registeredSchemas.get(name);
if (metadata) {
metadata.lastAccessedAt = new Date();
}
}
async getAllSchemas() {
await this.initialize();
return Array.from(this.registeredSchemas.values());
}
async getSchemaMetadata(name) {
await this.initialize();
const metadata = this.registeredSchemas.get(name);
if (metadata) {
this.updateLastAccessed(name);
}
return metadata || null;
}
async updateObjectCount(name) {
await this.initialize();
const metadata = this.registeredSchemas.get(name);
if (!metadata)
return;
// Only try to get actual count if storage instance already exists
const existingStorage = this.storageInstances.get(name);
if (!existingStorage) {
// No storage instance yet, just update metadata timestamp
metadata.lastAccessedAt = new Date();
return;
}
try {
const count = await existingStorage.count();
metadata.statistics.objectCount = count;
// Update storage size estimate
if (count > 0) {
// Estimate average object size (this is a rough estimate)
const sampleSize = Math.min(10, count);
const samples = await existingStorage.find({}, { limit: sampleSize });
let totalSize = 0;
for (const sample of samples) {
totalSize += JSON.stringify(sample).length;
}
metadata.statistics.averageObjectSize = totalSize / sampleSize;
metadata.statistics.totalStorageSize = metadata.statistics.averageObjectSize * count;
}
metadata.lastAccessedAt = new Date();
if (this.options.persistMetadata) {
await this.persistSchema(name, metadata);
}
}
catch (error) {
// Only log if it's not an expected error
if (!error?.message?.includes('No such index')) {
console.error(`Failed to update object count for ${name}:`, error);
}
}
}
startMetricsCollection() {
if (this.metricsInterval)
return;
this.metricsInterval = setInterval(async () => {
// Only update schemas that have storage instances
for (const name of this.storageInstances.keys()) {
await this.updateObjectCount(name);
}
}, this.options.metadataRefreshInterval);
}
async backup(options = {}) {
return performance_profiler_1.profiler.measure('registry.backup', async () => {
await this.initialize();
const backup = {
version: '1.0.0',
timestamp: new Date(),
schemas: {},
data: {},
};
// Backup metadata
if (options.includeMetadata !== false) {
for (const [name, metadata] of this.registeredSchemas.entries()) {
backup.schemas[name] = metadata;
}
}
// Backup data if requested
if (options.includeData) {
for (const name of this.registeredSchemas.keys()) {
const storage = await this.getStorage(name);
if (storage) {
try {
const allData = await storage.findAll();
backup.data[name] = allData;
}
catch (error) {
console.error(`Failed to backup data for ${name}:`, error);
}
}
}
}
const backupJson = JSON.stringify(backup, null, 2);
if (options.compress) {
// In a real implementation, we would compress the JSON
// For now, just return the JSON string
return backupJson;
}
return backupJson;
}, { includeData: options.includeData, compress: options.compress });
}
async restore(backupJson, options = {}) {
await this.initialize();
const result = {
success: true,
restored: [],
failed: [],
errors: {},
};
try {
const backup = JSON.parse(backupJson);
// Validate backup format
if (!backup.version || !backup.schemas) {
throw new Error('Invalid backup format');
}
// Restore schemas
for (const [name, metadata] of Object.entries(backup.schemas)) {
try {
const meta = metadata;
// Check if schema exists
const existing = this.registeredSchemas.get(name);
if (existing && !options.overwrite && !options.skipExisting) {
throw new Error(`Schema ${name} already exists`);
}
if (existing && options.skipExisting) {
continue;
}
// Validate schema if requested
if (options.validateSchema) {
// Add schema validation logic here
}
// Register or update schema
await this.register(name, meta.schema, meta.options);
// Restore statistics and metadata
const restoredMeta = this.registeredSchemas.get(name);
if (restoredMeta) {
restoredMeta.statistics = meta.statistics;
restoredMeta.performance = meta.performance;
restoredMeta.createdAt = new Date(meta.createdAt);
restoredMeta.updatedAt = new Date(meta.updatedAt);
restoredMeta.lastAccessedAt = new Date(meta.lastAccessedAt);
}
result.restored.push(name);
}
catch (error) {
result.failed.push(name);
result.errors[name] = error instanceof Error ? error.message : String(error);
result.success = false;
}
}
// Restore data if present
if (backup.data) {
for (const [name, data] of Object.entries(backup.data)) {
try {
const storage = await this.getStorage(name);
if (storage && Array.isArray(data)) {
// Clear existing data if overwrite is enabled
if (options.overwrite) {
await storage.clear();
}
// Restore data items
for (const item of data) {
// Keep all fields except system-generated entityId
const { entityId, __version, ...itemData } = item;
await storage.create(itemData);
}
}
}
catch (error) {
result.errors[`${name}_data`] = error instanceof Error ? error.message : String(error);
}
}
}
}
catch (error) {
result.success = false;
result.errors.general = error instanceof Error ? error.message : String(error);
}
return result;
}
async cleanup(inactiveDays = 30) {
const result = {
cleaned: [],
kept: [],
};
const cutoffDate = new Date();
cutoffDate.setDate(cutoffDate.getDate() - inactiveDays);
for (const [name, metadata] of this.registeredSchemas.entries()) {
if (metadata.lastAccessedAt < cutoffDate && metadata.statistics.objectCount === 0) {
// Remove inactive schema with no data
this.registeredSchemas.delete(name);
this.storageInstances.delete(name);
if (this.client) {
await this.client.hDel(this.registryKey, name);
}
result.cleaned.push(name);
}
else {
result.kept.push(name);
}
}
return result;
}
async validate(name) {
await this.initialize();
const result = {
valid: true,
errors: [],
warnings: [],
};
const metadata = this.registeredSchemas.get(name);
if (!metadata) {
result.valid = false;
result.errors.push(`Schema ${name} not found`);
return result;
}
try {
const storage = await this.getStorage(name);
if (!storage) {
result.valid = false;
result.errors.push(`Failed to create storage instance for ${name}`);
return result;
}
// Check if schema fields match stored data
const sampleData = await storage.find({}, { limit: 10 });
for (const item of sampleData) {
for (const fieldName of Object.keys(item)) {
if (fieldName !== 'entityId' &&
fieldName !== '__version' &&
fieldName !== '__createdAt' &&
fieldName !== '__updatedAt' &&
!metadata.attributes.fields[fieldName]) {
result.warnings.push(`Field ${fieldName} found in data but not in schema`);
}
}
}
// Check for unused fields
for (const fieldName of Object.keys(metadata.attributes.fields)) {
const hasData = sampleData.some(item => item[fieldName] !== undefined);
if (!hasData) {
result.warnings.push(`Field ${fieldName} defined in schema but no data found`);
}
}
// Validate indexes (exclude system fields)
const indexedFields = Object.entries(metadata.attributes.fields)
.filter(([name, config]) => {
// Exclude system fields from the check
const isSystemField = name.startsWith('__') || name === 'entityId';
return !isSystemField && config.indexed;
})
.map(([name]) => name);
if (indexedFields.length === 0) {
result.warnings.push('No indexed fields defined, queries may be slow');
}
}
catch (error) {
result.valid = false;
result.errors.push(error instanceof Error ? error.message : String(error));
}
return result;
}
async getStatistics() {
return performance_profiler_1.profiler.measure('registry.getStatistics', async () => {
await this.initialize();
let totalObjects = 0;
let totalOperations = 0;
let storageSize = 0;
let activeSchemas = 0;
const schemas = [];
for (const [name, metadata] of this.registeredSchemas.entries()) {
if (metadata.status === 'active') {
activeSchemas++;
}
totalObjects += metadata.statistics.objectCount;
totalOperations += metadata.statistics.totalOperations;
storageSize += metadata.statistics.totalStorageSize;
schemas.push({
name,
operations: metadata.statistics.totalOperations,
objects: metadata.statistics.objectCount,
});
}
// Sort by operations count
schemas.sort((a, b) => b.operations - a.operations);
return {
totalSchemas: this.registeredSchemas.size,
activeSchemas,
totalObjects,
totalOperations,
storageSize,
topSchemas: schemas.slice(0, 10),
};
}, { schemaCount: this.registeredSchemas.size });
}
getVersionManager(name) {
return this.versionManagers.get(name);
}
async shutdown() {
// Clear the metrics interval first
if (this.metricsInterval) {
clearInterval(this.metricsInterval);
this.metricsInterval = null;
}
// Clean up version managers
for (const versionManager of this.versionManagers.values()) {
try {
await versionManager.cleanup();
}
catch (error) {
// Ignore cleanup errors
}
}
this.versionManagers.clear();
// Cancel any pending creations
this.creatingInstances.clear();
// Disconnect all storage instances in parallel with timeout
const disconnectPromises = Array.from(this.storageInstances.entries()).map(async ([name, storage]) => {
const timeout = new Promise((resolve) => setTimeout(() => resolve('timeout'), 5000));
const disconnect = storage.disconnect().catch(error => {
console.error(`Error disconnecting storage ${name}:`, error);
return 'error';
});
return Promise.race([disconnect, timeout]);
});
await Promise.all(disconnectPromises);
// Persist all schemas before shutdown
if (this.options.persistMetadata && this.client?.isOpen) {
try {
const persistPromises = Array.from(this.registeredSchemas.entries()).map(([name, metadata]) => this.persistSchema(name, metadata).catch(() => { }));
await Promise.all(persistPromises);
}
catch (error) {
console.error('Error persisting schemas:', error);
}
try {
await this.client.quit();
}
catch (error) {
console.error('Error closing Redis client:', error);
}
this.client = null;
}
// Clear instances
this.storageInstances.clear();
this.registeredSchemas.clear();
this.versionManagers.clear();
this.initialized = false;
}
async clearAllPersistedSchemas() {
if (!this.client?.isOpen)
return;
try {
// Delete the entire registry hash
await this.client.del(this.registryKey);
// Use cursor-based SCAN to find registry-related keys
const keyManager = (0, redis_key_manager_1.getRedisKeyManager)();
const backupPattern = `${this.registryKey}:*`;
const keys = await keyManager.getAllKeysMatching(this.client, backupPattern);
if (keys.length > 0) {
await Promise.all(keys.map((key) => this.client.del(key)));
}
}
catch (error) {
console.error('Failed to clear persisted schemas:', error);
}
}
/**
* Unregister a schema and remove its storage instance from cache
* This properly cleans up both the schema and the cached instance
*/
async unregister(name) {
// Get the storage instance if it exists
const storage = this.storageInstances.get(name);
// Clear and disconnect storage if it exists and is connected
if (storage) {
try {
// Check if storage has a valid connection before trying to clear
const storageAny = storage;
if (storageAny.redisClient?.isOpen || storageAny.client?.isOpen) {
await storage.clear();
}
await storage.disconnect();
}
catch (error) {
// Only warn if it's not a connection error
if (!error.message?.includes('not connected')) {
console.warn(`Error clearing storage ${name}:`, error);
}
}
}
// Remove from cache maps (this should always happen)
const hadSchema = this.registeredSchemas.delete(name);
const hadInstance = this.storageInstances.delete(name);
// Clean up Redis keys directly if we have a connection
// This ensures cleanup even if storage.clear() failed
if (this.client?.isOpen) {
try {
// Remove registry entries
await this.client.hDel(this.registryKey, name);
await this.client.hDel(`${this.registryKey}:stats`, name);
// Also clean up storage keys using cursor-based SCAN
const keyManager = (0, redis_key_manager_1.getRedisKeyManager)();
// Use default storage prefix since storageOptions is not available
const pattern = `${redis_key_manager_1.RedisKeyManager.getDefaultStoragePrefix()}:${name}:*`;
await keyManager.deleteKeysMatching(this.client, pattern);
}
catch (error) {
console.warn(`Error removing ${name} from Redis:`, error);
}
}
return hadSchema || hadInstance;
}
/**
* Remove just the storage instance from cache (keeps schema)
*/
removeStorageInstance(name) {
this.storageInstances.delete(name);
}
/**
* Register schema with version in single hash
*/
async registerSchemaVersion(name, version) {
if (!this.client?.isOpen)
return;
try {
// Store schema:version in single hash
await this.client.hSet(SchemaRegistry.REGISTRY_KEY, name, version.toString());
}
catch (error) {
console.error(`Failed to register schema version for ${name}:`, error);
}
}
/**
* Get all registered schemas from single hash
*/
async getAllRegisteredSchemas() {
if (!this.client?.isOpen)
return {};
try {
const data = await this.client.hGetAll(SchemaRegistry.REGISTRY_KEY);
const result = {};
// Convert string versions to numbers
for (const [name, version] of Object.entries(data)) {
result[name] = parseInt(version, 10);
}
return result;
}
catch (error) {
console.error('Failed to get registered schemas:', error);
return {};
}
}
/**
* Check if schema is registered in single hash
*/
async hasSchema(name) {
if (!this.client?.isOpen)
return false;
try {
return await this.client.hExists(SchemaRegistry.REGISTRY_KEY, name);
}
catch (error) {
console.error(`Failed to check schema ${name}:`, error);
return false;
}
}
/**
* Get schema version from single hash
*/
async getSchemaVersion(name) {
if (!this.client?.isOpen)
return null;
try {
const version = await this.client.hGet(SchemaRegistry.REGISTRY_KEY, name);
return version ? parseInt(version, 10) : null;
}
catch (error) {
console.error(`Failed to get schema version for ${name}:`, error);
return null;
}
}
/**
* Remove schema from single hash
*/
async removeSchemaFromRegistry(name) {
if (!this.client?.isOpen)
return false;
try {
const removed = await this.client.hDel(SchemaRegistry.REGISTRY_KEY, name);
return removed > 0;
}
catch (error) {
console.error(`Failed to remove schema ${name} from registry:`, error);
return false;
}
}
}
exports.SchemaRegistry = SchemaRegistry;
// Export singleton getter
function getSchemaRegistry(options) {
return SchemaRegistry.getInstance(options);
}
//# sourceMappingURL=schema-registry.js.map