@yihuangdb/storage-object
Version:
A Node.js storage object layer library using Redis OM
383 lines (361 loc) • 15.8 kB
JavaScript
"use strict";
/**
* Storage Version Manager
*
* Manages version tracking for both schema and data changes,
* enabling incremental sync and change tracking across nodes.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.StorageVersionManager = void 0;
class StorageVersionManager {
keyManager;
redis;
constructor(keyManager, redis) {
this.keyManager = keyManager;
this.redis = redis;
}
/**
* Initialize the version manager by setting up initial version counters if needed
*/
async initialize() {
// Initialize version counters if they don't exist
const [schemaVer, storageVer] = await Promise.all([
this.redis.get(this.keyManager.storageSchemaVersion()),
this.redis.get(this.keyManager.storageDataVersion())
]);
if (!schemaVer) {
await this.redis.set(this.keyManager.storageSchemaVersion(), '1');
}
if (!storageVer) {
await this.redis.set(this.keyManager.storageDataVersion(), '0');
}
}
// === Schema Version Management ===
async getCurrentSchemaVersion() {
const schemaVer = await this.redis.get(this.keyManager.storageSchemaVersion());
return parseInt(schemaVer || '1');
}
async incrementSchemaVersion(schemaChanges) {
// Use Lua script for atomic operation to prevent race conditions
const luaScript = `
local schemaVersionKey = KEYS[1]
local changelogKey = KEYS[2]
local changeData = ARGV[1]
local newVersion = redis.call('INCR', schemaVersionKey)
local changeEntry = cjson.decode(changeData)
changeEntry.schemaVersionNum = newVersion
changeEntry.timestamp = tonumber(ARGV[2])
redis.call('ZADD', changelogKey, newVersion, cjson.encode(changeEntry))
return newVersion
`;
const newSchemaVersion = await this.redis.eval(luaScript, {
keys: [
this.keyManager.storageSchemaVersion(),
this.keyManager.storageSchemaChangelog()
],
arguments: [
JSON.stringify(schemaChanges),
Date.now().toString()
]
});
return newSchemaVersion;
}
async getSchemaChangesSince(fromSchemaVersion) {
const changes = await this.redis.zRangeByScore(this.keyManager.storageSchemaChangelog(), fromSchemaVersion + 1, '+inf');
return changes.map(c => JSON.parse(c));
}
// === Storage Version Management ===
async getCurrentStorageVersion() {
const storageVer = await this.redis.get(this.keyManager.storageDataVersion());
return parseInt(storageVer || '0');
}
async trackStorageChange(entityId, operation) {
// Use Lua script for fully atomic operation to prevent race conditions
const luaScript = `
local storageVersionKey = KEYS[1]
local schemaVersionKey = KEYS[2]
local changelogKey = KEYS[3]
local dirtySetKey = KEYS[4]
local entityId = ARGV[1]
local operation = ARGV[2]
local timestamp = ARGV[3]
-- Atomically increment storage version
local newStorageVersion = redis.call('INCR', storageVersionKey)
-- Get current schema version
local schemaVersion = redis.call('GET', schemaVersionKey)
if not schemaVersion then
schemaVersion = '1'
end
-- Create change entry
local changeEntry = {
storageVersionNum = newStorageVersion,
schemaVersionNum = tonumber(schemaVersion),
operation = operation,
entityId = entityId,
timestamp = tonumber(timestamp)
}
-- Add to changelog and dirty set atomically
redis.call('ZADD', changelogKey, newStorageVersion, cjson.encode(changeEntry))
redis.call('SADD', dirtySetKey, entityId)
return newStorageVersion
`;
const newStorageVersion = await this.redis.eval(luaScript, {
keys: [
this.keyManager.storageDataVersion(),
this.keyManager.storageSchemaVersion(),
this.keyManager.storageDataChangelog(),
this.keyManager.dirtyEntitiesSet()
],
arguments: [
entityId,
operation,
Date.now().toString()
]
});
return newStorageVersion;
}
async trackBatchStorageChanges(changes) {
if (changes.length === 0)
return [];
// Optimized Lua script with atomic version range reservation
const luaScript = `
local storageVersionKey = KEYS[1]
local schemaVersionKey = KEYS[2]
local changelogKey = KEYS[3]
local dirtySetKey = KEYS[4]
local changesJson = ARGV[1]
local timestamp = ARGV[2]
-- Parse changes array
local changes = cjson.decode(changesJson)
local numChanges = #changes
-- Get current schema version once
local schemaVersion = redis.call('GET', schemaVersionKey)
if not schemaVersion then
schemaVersion = '1'
end
schemaVersion = tonumber(schemaVersion)
-- Atomically reserve version range (single operation instead of N operations)
local startVersion = tonumber(redis.call('GET', storageVersionKey) or '0')
local endVersion = redis.call('INCRBY', storageVersionKey, numChanges)
-- Build batch arguments for ZADD (single call instead of N calls)
local zaddArgs = {}
local entityIds = {}
for i, change in ipairs(changes) do
-- Assign version from reserved range (no gaps!)
local assignedVersion = startVersion + i
-- Create change entry
local changeEntry = {
storageVersionNum = assignedVersion,
schemaVersionNum = schemaVersion,
operation = change.operation,
entityId = change.entityId,
timestamp = tonumber(timestamp) + i - 1
}
-- Add to ZADD arguments
table.insert(zaddArgs, assignedVersion)
table.insert(zaddArgs, cjson.encode(changeEntry))
-- Collect entity IDs
table.insert(entityIds, change.entityId)
end
-- Single ZADD with all entries (1 operation instead of N)
if #zaddArgs > 0 then
redis.call('ZADD', changelogKey, unpack(zaddArgs))
end
-- Single SADD with all entity IDs (1 operation instead of N)
if #entityIds > 0 then
redis.call('SADD', dirtySetKey, unpack(entityIds))
end
-- Return assigned versions
local versions = {}
for i = 1, numChanges do
table.insert(versions, startVersion + i)
end
return versions
`;
const versions = await this.redis.eval(luaScript, {
keys: [
this.keyManager.storageDataVersion(),
this.keyManager.storageSchemaVersion(),
this.keyManager.storageDataChangelog(),
this.keyManager.dirtyEntitiesSet()
],
arguments: [
JSON.stringify(changes),
Date.now().toString()
]
});
return versions;
}
async getStorageChangesBetween(fromStorageVersion, toStorageVersion) {
const toVersion = toStorageVersion || '+inf';
const changes = await this.redis.zRangeByScore(this.keyManager.storageDataChangelog(), fromStorageVersion + 1, toVersion);
return changes.map(c => JSON.parse(c));
}
async getStorageVersionDelta(fromStorageVersion) {
const [storageChanges, currentStorageVer, currentSchemaVer] = await Promise.all([
this.getStorageChangesBetween(fromStorageVersion),
this.getCurrentStorageVersion(),
this.getCurrentSchemaVersion()
]);
return {
fromStorageVersion,
toStorageVersion: currentStorageVer,
currentSchemaVersion: currentSchemaVer,
storageChanges
};
}
// === Batch Operations ===
async getStorageVersionBatch(fromStorageVersion, batchSize = 100, batchCursor) {
const currentStorageVer = await this.getCurrentStorageVersion();
const batchStartVersion = batchCursor || fromStorageVersion + 1;
const batchEndVersion = Math.min(batchStartVersion + batchSize - 1, currentStorageVer);
const changes = await this.redis.zRangeByScore(this.keyManager.storageDataChangelog(), batchStartVersion, batchEndVersion);
const totalChanges = await this.redis.zCount(this.keyManager.storageDataChangelog(), fromStorageVersion + 1, currentStorageVer);
return {
fromStorageVersion: batchStartVersion - 1,
toStorageVersion: batchEndVersion,
storageChanges: changes.map(c => JSON.parse(c)),
hasMoreBatches: batchEndVersion < currentStorageVer,
nextBatchCursor: batchEndVersion < currentStorageVer ? batchEndVersion + 1 : 0,
totalChangesCount: totalChanges,
currentBatchSize: changes.length
};
}
// === Sync State Management ===
async getNodeSyncState(nodeId) {
const stateJson = await this.redis.get(this.keyManager.syncState(nodeId));
if (!stateJson) {
return {
nodeId,
lastSyncedStorageVersion: 0,
lastSyncedSchemaVersion: 1,
lastSyncTimestamp: null,
syncStatus: 'never_synced'
};
}
return JSON.parse(stateJson);
}
async updateNodeSyncState(nodeId, syncedStorageVersion, syncedSchemaVersion) {
const state = {
nodeId,
lastSyncedStorageVersion: syncedStorageVersion,
lastSyncedSchemaVersion: syncedSchemaVersion,
lastSyncTimestamp: new Date().toISOString(),
syncStatus: 'synced'
};
await this.redis.set(this.keyManager.syncState(nodeId), JSON.stringify(state));
await this.redis.set(this.keyManager.nodeLastSeen(nodeId), Date.now().toString());
}
// === Progressive Sync ===
async startProgressiveSync(nodeId, fromStorageVersion, batchSize = 100) {
const syncId = `${nodeId}-${Date.now()}`;
const currentStorageVer = await this.getCurrentStorageVersion();
const totalChanges = await this.redis.zCount(this.keyManager.storageDataChangelog(), fromStorageVersion + 1, currentStorageVer);
const totalBatches = Math.ceil(totalChanges / batchSize);
const progress = {
nodeId,
totalStorageVersions: currentStorageVer - fromStorageVersion,
syncedStorageVersions: 0,
currentBatchNumber: 0,
totalBatchesCount: totalBatches,
syncStartTime: Date.now()
};
await this.redis.setEx(this.keyManager.syncProgress(syncId), 3600, // 1 hour TTL
JSON.stringify(progress));
return syncId;
}
async getNextSyncBatch(syncId) {
const progressKey = this.keyManager.syncProgress(syncId);
const progressJson = await this.redis.get(progressKey);
if (!progressJson) {
throw new Error(`Sync session ${syncId} not found or expired`);
}
const progress = JSON.parse(progressJson);
const syncState = await this.getNodeSyncState(progress.nodeId);
const batch = await this.getStorageVersionBatch(syncState.lastSyncedStorageVersion + progress.syncedStorageVersions, 100);
progress.syncedStorageVersions += batch.currentBatchSize;
progress.currentBatchNumber++;
const elapsed = Date.now() - progress.syncStartTime;
const avgTimePerBatch = elapsed / progress.currentBatchNumber;
progress.estimatedTimeRemaining = Math.round(avgTimePerBatch * (progress.totalBatchesCount - progress.currentBatchNumber));
await this.redis.setEx(progressKey, 3600, JSON.stringify(progress));
return batch;
}
async completeSyncBatch(syncId, batchStorageVersion) {
const progressKey = this.keyManager.syncProgress(syncId);
const progressJson = await this.redis.get(progressKey);
if (!progressJson)
return false;
const progress = JSON.parse(progressJson);
await this.updateNodeSyncState(progress.nodeId, batchStorageVersion, await this.getCurrentSchemaVersion());
if (progress.syncedStorageVersions >= progress.totalStorageVersions) {
await this.redis.del(progressKey);
return true; // Sync complete
}
return false; // More batches remain
}
// === Cleanup ===
async compactStorageChangelog(keepLastNVersions = 10000) {
const totalVersions = await this.redis.zCard(this.keyManager.storageDataChangelog());
if (totalVersions > keepLastNVersions * 2) {
const removeCount = totalVersions - keepLastNVersions;
await this.redis.zRemRangeByRank(this.keyManager.storageDataChangelog(), 0, removeCount - 1);
return removeCount;
}
return 0;
}
async getDirtyEntities() {
return await this.redis.sMembers(this.keyManager.dirtyEntitiesSet());
}
async clearDirtyEntities(entityIds) {
if (entityIds && entityIds.length > 0) {
await this.redis.sRem(this.keyManager.dirtyEntitiesSet(), entityIds);
}
else {
await this.redis.del(this.keyManager.dirtyEntitiesSet());
}
}
// === Export Range Methods ===
async *exportStorageVersionRange(fromStorageVersion, toStorageVersion, format = 'json') {
const endVersion = toStorageVersion || await this.getCurrentStorageVersion();
const batchSize = 1000;
let cursor = fromStorageVersion + 1;
if (format === 'csv') {
yield 'storageVersion,schemaVersion,operation,entityId,timestamp\n';
}
while (cursor <= endVersion) {
const batchEnd = Math.min(cursor + batchSize - 1, endVersion);
const changes = await this.redis.zRangeByScore(this.keyManager.storageDataChangelog(), cursor, batchEnd);
for (const change of changes) {
const parsed = JSON.parse(change);
switch (format) {
case 'csv':
yield `${parsed.storageVersionNum},${parsed.schemaVersionNum},${parsed.operation},${parsed.entityId},${parsed.timestamp}\n`;
break;
case 'ndjson':
yield JSON.stringify(parsed) + '\n';
break;
default:
yield JSON.stringify(parsed) + ',\n';
}
}
cursor = batchEnd + 1;
}
}
// === Consolidated Changes ===
async getConsolidatedChangesByEntity(fromStorageVersion, toStorageVersion) {
const endVersion = toStorageVersion || await this.getCurrentStorageVersion();
const changes = await this.redis.zRangeByScore(this.keyManager.storageDataChangelog(), fromStorageVersion + 1, endVersion);
const grouped = new Map();
for (const change of changes) {
const parsed = JSON.parse(change);
if (!grouped.has(parsed.entityId)) {
grouped.set(parsed.entityId, []);
}
grouped.get(parsed.entityId).push(parsed);
}
return grouped;
}
}
exports.StorageVersionManager = StorageVersionManager;
//# sourceMappingURL=storage-version-manager.js.map