UNPKG

lmdb

Version:

Simple, efficient, scalable, high-performance LMDB interface

1,581 lines (1,551 loc) 107 kB
'use strict'; Object.defineProperty(exports, '__esModule', { value: true }); var module$1 = require('module'); var pathModule = require('path'); var url = require('url'); var loadNAPI = require('node-gyp-build-optional-packages'); var extendedIterable = require('@harperfast/extended-iterable'); var events = require('events'); var os$1 = require('os'); var fs$1 = require('fs'); var msgpackr = require('msgpackr'); var weakLruCache = require('weak-lru-cache'); var orderedBinary$1 = require('ordered-binary'); function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; } function _interopNamespace(e) { if (e && e.__esModule) return e; var n = Object.create(null); if (e) { Object.keys(e).forEach(function (k) { if (k !== 'default') { var d = Object.getOwnPropertyDescriptor(e, k); Object.defineProperty(n, k, d.get ? d : { enumerable: true, get: function () { return e[k]; } }); } }); } n["default"] = e; return Object.freeze(n); } var pathModule__default = /*#__PURE__*/_interopDefaultLegacy(pathModule); var loadNAPI__default = /*#__PURE__*/_interopDefaultLegacy(loadNAPI); var fs__default = /*#__PURE__*/_interopDefaultLegacy(fs$1); var orderedBinary__namespace = /*#__PURE__*/_interopNamespace(orderedBinary$1); let Env, Txn, Dbi, Compression, Cursor, getAddress, getBufferAddress; exports.clearKeptObjects = void 0; let globalBuffer, setGlobalBuffer, arch$1, fs, os, isLittleEndian, tmpdir, lmdbError, path, EventEmitter, orderedBinary, MsgpackrEncoder, WeakLRUCache, getByBinary, startRead, setReadCallback, write, position, iterate, prefetch, resetTxn, getCurrentValue, getStringByBinary, getSharedBuffer, compress, directWrite, getUserSharedBuffer, notifyUserCallbacks, attemptLock, unlock; exports.version = void 0; path = pathModule__default["default"]; let dirName = pathModule.dirname(url.fileURLToPath((typeof document === 'undefined' ? new (require('u' + 'rl').URL)('file:' + __filename).href : (document.currentScript && document.currentScript.src || new URL('index.cjs', document.baseURI).href)))).replace(/dist$/, ''); let nativeAddon = loadNAPI__default["default"](dirName); if (process.isBun && false) { const { linkSymbols, FFIType } = require('bun:ffi'); let lmdbLib = linkSymbols({ getByBinary: { args: [FFIType.f64, FFIType.u32], returns: FFIType.u32, ptr: nativeAddon.getByBinaryPtr, }, iterate: { args: [FFIType.f64], returns: FFIType.i32, ptr: nativeAddon.iteratePtr, }, position: { args: [FFIType.f64, FFIType.u32, FFIType.u32, FFIType.u32, FFIType.f64], returns: FFIType.i32, ptr: nativeAddon.positionPtr, }, write: { args: [FFIType.f64, FFIType.f64], returns: FFIType.i32, ptr: nativeAddon.writePtr, }, resetTxn: { args: [FFIType.f64], returns: FFIType.void, ptr: nativeAddon.resetTxnPtr, }, }); for (let key in lmdbLib.symbols) { nativeAddon[key] = lmdbLib.symbols[key].native; } } setNativeFunctions(nativeAddon); function setNativeFunctions(externals) { Env = externals.Env; Txn = externals.Txn; Dbi = externals.Dbi; Compression = externals.Compression; getAddress = externals.getAddress; getBufferAddress = externals.getBufferAddress; externals.createBufferForAddress; exports.clearKeptObjects = externals.clearKeptObjects || function () {}; getByBinary = externals.getByBinary; externals.detachBuffer; startRead = externals.startRead; setReadCallback = externals.setReadCallback; setGlobalBuffer = externals.setGlobalBuffer; globalBuffer = externals.globalBuffer; getSharedBuffer = externals.getSharedBuffer; prefetch = externals.prefetch; iterate = externals.iterate; position = externals.position; resetTxn = externals.resetTxn; directWrite = externals.directWrite; getUserSharedBuffer = externals.getUserSharedBuffer; notifyUserCallbacks = externals.notifyUserCallbacks; attemptLock = externals.attemptLock; unlock = externals.unlock; getCurrentValue = externals.getCurrentValue; externals.getCurrentShared; getStringByBinary = externals.getStringByBinary; externals.getSharedByBinary; write = externals.write; compress = externals.compress; Cursor = externals.Cursor; lmdbError = externals.lmdbError; exports.version = externals.version; if (externals.tmpdir) tmpdir = externals.tmpdir; } function setExternals(externals) { arch$1 = externals.arch; fs = externals.fs; EventEmitter = externals.EventEmitter; orderedBinary = externals.orderedBinary; MsgpackrEncoder = externals.MsgpackrEncoder; WeakLRUCache = externals.WeakLRUCache; tmpdir = externals.tmpdir; os = externals.os; externals.onExit; isLittleEndian = externals.isLittleEndian; } function when(promise, callback, errback) { if (promise && promise.then) { return errback ? promise.then(callback, errback) : promise.then(callback); } return callback(promise); } var backpressureArray; const WAITING_OPERATION = 0x2000000; const BACKPRESSURE_THRESHOLD = 300000; const TXN_DELIMITER = 0x8000000; const TXN_COMMITTED = 0x10000000; const TXN_FLUSHED = 0x20000000; const TXN_FAILED = 0x40000000; const FAILED_CONDITION = 0x4000000; const REUSE_BUFFER_MODE$1 = 512; const RESET_BUFFER_MODE = 1024; const NO_RESOLVE = 16; const HAS_TXN = 8; const CONDITIONAL_VERSION_LESS_THAN = 0x800; const CONDITIONAL_ALLOW_NOTFOUND = 0x800; const SYNC_PROMISE_SUCCESS = Promise.resolve(true); const SYNC_PROMISE_FAIL = Promise.resolve(false); SYNC_PROMISE_SUCCESS.isSync = true; SYNC_PROMISE_SUCCESS.result = true; SYNC_PROMISE_FAIL.isSync = true; SYNC_PROMISE_FAIL.result = false; const PROMISE_SUCCESS = Promise.resolve(true); const arch = process.arch; const ABORT = 4.452694326329068e-106; // random/unguessable numbers, which work across module/versions and native const IF_EXISTS$1 = 3.542694326329068e-103; const CALLBACK_THREW = {}; const LocalSharedArrayBuffer = typeof Deno != 'undefined' || // Deno can't handle SharedArrayBuffer as an FFI // argument due to https://github.com/denoland/deno/issues/12678 typeof SharedArrayBuffer == 'undefined' // Sometimes electron doesn't have a SharedArrayBuffer ? ArrayBuffer : SharedArrayBuffer; const ByteArray = typeof Buffer != 'undefined' ? function (buffer) { return Buffer.from(buffer); } : Uint8Array; const queueTask = typeof setImmediate != 'undefined' ? setImmediate : setTimeout; // TODO: Or queueMicrotask? //let debugLog = [] const WRITE_BUFFER_SIZE = 0x10000; function addWriteMethods( LMDBStore, { env, fixedBuffer, resetReadTxn, useWritemap, maxKeySize, eventTurnBatching, txnStartThreshold, batchStartThreshold, overlappingSync, commitDelay, separateFlushed, maxFlushDelay, }, ) { // stands for write instructions var dynamicBytes; function allocateInstructionBuffer(lastPosition) { // Must use a shared buffer on older node in order to use Atomics, and it is also more correct since we are // indeed accessing and modifying it from another thread (in C). However, Deno can't handle it for // FFI so aliased above let buffer = new LocalSharedArrayBuffer(WRITE_BUFFER_SIZE); let lastBytes = dynamicBytes; dynamicBytes = new ByteArray(buffer); let uint32 = (dynamicBytes.uint32 = new Uint32Array( buffer, 0, WRITE_BUFFER_SIZE >> 2, )); uint32[2] = 0; dynamicBytes.float64 = new Float64Array(buffer, 0, WRITE_BUFFER_SIZE >> 3); buffer.address = getBufferAddress(dynamicBytes); uint32.address = buffer.address + uint32.byteOffset; dynamicBytes.position = 1; // we start at position 1 to save space for writing the txn id before the txn delimiter if (lastPosition) { lastBytes.float64[lastPosition + 1] = dynamicBytes.uint32.address + (dynamicBytes.position << 3); lastBytes.uint32[lastPosition << 1] = 3; // pointer instruction } return dynamicBytes; } var newBufferThreshold = (WRITE_BUFFER_SIZE - maxKeySize - 64) >> 3; // need to reserve more room if we do inline values var outstandingWriteCount = 0; var startAddress = 0; var writeTxn = null; var committed; var abortedNonChildTransactionWarn; var nextTxnCallbacks = []; var commitPromise, flushPromise, flushResolvers = [], batchFlushResolvers = []; commitDelay = commitDelay || 0; eventTurnBatching = eventTurnBatching === false ? false : true; var enqueuedCommit; var afterCommitCallbacks = []; var beforeCommitCallbacks = []; var enqueuedEventTurnBatch; var batchDepth = 0; var lastWritePromise; var writeBatchStart, outstandingBatchCount, lastSyncTxnFlush; var hasUnresolvedTxns; txnStartThreshold = txnStartThreshold || 5; batchStartThreshold = batchStartThreshold || 1000; maxFlushDelay = maxFlushDelay || 500; allocateInstructionBuffer(); dynamicBytes.uint32[2] = TXN_DELIMITER | TXN_COMMITTED | TXN_FLUSHED; var txnResolution, nextResolution = { uint32: dynamicBytes.uint32, flagPosition: 2, flag: 0, valueBuffer: null, next: null, meta: null, }; var uncommittedResolution = { uint32: null, flagPosition: 2, flag: 0, valueBuffer: null, next: nextResolution, meta: null, }; var unwrittenResolution = nextResolution; var lastPromisedResolution = uncommittedResolution; var lastQueuedResolution = uncommittedResolution; function writeInstructions(flags, store, key, value, version, ifVersion) { let writeStatus; let targetBytes, position, encoder; let valueSize, valueBuffer, valueBufferStart; if (flags & 2) { // encode first in case we have to write a shared structure encoder = store.encoder; if (value && value['\x10binary-data\x02']) valueBuffer = value['\x10binary-data\x02']; else if (encoder) { if (encoder.copyBuffers) // use this as indicator for support buffer reuse for now valueBuffer = encoder.encode( value, REUSE_BUFFER_MODE$1 | (writeTxn ? RESET_BUFFER_MODE : 0), ); // in addition, if we are writing sync, after using, we can immediately reset the encoder's position to reuse that space, which can improve performance else { // various other encoders, including JSON.stringify, that might serialize to a string valueBuffer = encoder.encode(value); if (typeof valueBuffer == 'string') valueBuffer = Buffer.from(valueBuffer); // TODO: Would be nice to write strings inline in the instructions } } else if (typeof value == 'string') { valueBuffer = Buffer.from(value); // TODO: Would be nice to write strings inline in the instructions } else if (value instanceof Uint8Array) valueBuffer = value; else throw new Error( 'Invalid value to put in database ' + value + ' (' + typeof value + '), consider using encoder', ); valueBufferStart = valueBuffer.start; if (valueBufferStart > -1) // if we have buffers with start/end position valueSize = valueBuffer.end - valueBufferStart; // size else valueSize = valueBuffer.length; if (store.dupSort && valueSize > maxKeySize) throw new Error( 'The value is larger than the maximum size (' + maxKeySize + ') for a value in a dupSort database', ); } else valueSize = 0; if (writeTxn) { targetBytes = fixedBuffer; position = 0; } else { if (eventTurnBatching && !enqueuedEventTurnBatch && batchDepth == 0) { enqueuedEventTurnBatch = queueTask(() => { try { for (let i = 0, l = beforeCommitCallbacks.length; i < l; i++) { try { beforeCommitCallbacks[i](); } catch (error) { console.error('In beforecommit callback', error); } } } catch (error) { console.error(error); } enqueuedEventTurnBatch = null; batchDepth--; finishBatch(); if (writeBatchStart) writeBatchStart(); // TODO: When we support delay start of batch, optionally don't delay this }); commitPromise = null; // reset the commit promise, can't know if it is really a new transaction prior to finishWrite being called flushPromise = null; writeBatchStart = writeInstructions(1, store); outstandingBatchCount = 0; batchDepth++; } targetBytes = dynamicBytes; position = targetBytes.position; } let uint32 = targetBytes.uint32, float64 = targetBytes.float64; let flagPosition = position << 1; // flagPosition is the 32-bit word starting position // don't increment position until we are sure we don't have any key writing errors if (!uint32) { throw new Error('Internal buffers have been corrupted'); } uint32[flagPosition + 1] = store.db.dbi; if (flags & 4) { let keyStartPosition = (position << 3) + 12; let endPosition; try { endPosition = store.writeKey(key, targetBytes, keyStartPosition); if (!(keyStartPosition < endPosition) && (flags & 0xf) != 12) throw new Error( 'Invalid key or zero length key is not allowed in LMDB ' + key, ); } catch (error) { targetBytes.fill(0, keyStartPosition); if (error.name == 'RangeError') error = new Error( 'Key size is larger than the maximum key size (' + maxKeySize + ')', ); throw error; } let keySize = endPosition - keyStartPosition; if (keySize > maxKeySize) { targetBytes.fill(0, keyStartPosition); // restore zeros throw new Error( 'Key size is larger than the maximum key size (' + maxKeySize + ')', ); } uint32[flagPosition + 2] = keySize; position = (endPosition + 16) >> 3; if (flags & 2) { let mustCompress; if (valueBufferStart > -1) { // if we have buffers with start/end position // record pointer to value buffer float64[position] = (valueBuffer.address || (valueBuffer.address = getAddress(valueBuffer.buffer))) + valueBufferStart; if (store.compression) { let compressionFlagIndex = valueBufferStart + (store.compression.startingOffset || 0); // this is the compression indicator, so we must compress mustCompress = compressionFlagIndex < valueBuffer.end && valueBuffer[compressionFlagIndex] >= 250; } } else { let valueArrayBuffer = valueBuffer.buffer; // record pointer to value buffer let address = (valueArrayBuffer.address || (valueBuffer.length === 0 ? 0 // externally allocated buffers of zero-length with the same non-null-pointer can crash node, #161 : (valueArrayBuffer.address = getAddress(valueArrayBuffer)))) + valueBuffer.byteOffset; if (address <= 0 && valueBuffer.length > 0) console.error('Supplied buffer had an invalid address', address); float64[position] = address; if (store.compression) { let compressionFlagIndex = store.compression.startingOffset || 0; // this is the compression indicator, so we must compress mustCompress = compressionFlagIndex < valueBuffer.length && valueBuffer[compressionFlagIndex] >= 250; } } uint32[(position++ << 1) - 1] = valueSize; if ( store.compression && (valueSize >= store.compression.threshold || mustCompress) ) { flags |= 0x100000; float64[position] = store.compression.address; if (!writeTxn) compress(env.address, uint32.address + (position << 3), () => { // this is never actually called in NodeJS, just use to pin the buffer in memory until it is finished // and is a no-op in Deno if (!float64) throw new Error('No float64 available'); }); position++; } } if (ifVersion !== undefined) { if (ifVersion === null) flags |= 0x10; // if it does not exist, MDB_NOOVERWRITE else { flags |= 0x100; float64[position++] = ifVersion; } } if (version !== undefined) { flags |= 0x200; float64[position++] = version || 0; } } else position++; targetBytes.position = position; if (writeTxn) { uint32[0] = flags; write(env.address, uint32.address); return () => uint32[0] & FAILED_CONDITION ? SYNC_PROMISE_FAIL : SYNC_PROMISE_SUCCESS; } // if we ever use buffers that haven't been zero'ed, need to clear out the next slot like this: // uint32[position << 1] = 0 // clear out the next slot let nextUint32; if (position > newBufferThreshold) { targetBytes = allocateInstructionBuffer(position); position = targetBytes.position; nextUint32 = targetBytes.uint32; } else nextUint32 = uint32; let resolution = nextResolution; // create the placeholder next resolution nextResolution = resolution.next = { // we try keep resolutions exactly the same object type uint32: nextUint32, flagPosition: position << 1, flag: 0, // TODO: eventually eliminate this, as we can probably signify HAS_TXN/NO_RESOLVE/FAILED_CONDITION in upper bits valueBuffer: fixedBuffer, // these are all just placeholders so that we have the right hidden class initially allocated next: null, meta: null, }; lastQueuedResolution = resolution; let writtenBatchDepth = batchDepth; return (callback) => { if (writtenBatchDepth) { // If we are in a batch, the transaction can't close, so we do the faster, // but non-deterministic updates, knowing that the write thread can // just poll for the status change if we miss a status update. // That is, if we are on x64 architecture... if (arch === 'x64') { writeStatus = uint32[flagPosition]; uint32[flagPosition] = flags; } else { // However, on ARM processors, apparently more radical memory reordering can occur // so we need to use the slower atomic operation to ensure that a memory barrier is set // and that the value pointer is actually written before the flag is updated writeStatus = Atomics.or(uint32, flagPosition, flags); } if (writeBatchStart && !writeStatus) { outstandingBatchCount += 1 + (valueSize >> 12); if (outstandingBatchCount > batchStartThreshold) { outstandingBatchCount = 0; writeBatchStart(); writeBatchStart = null; } } } // otherwise the transaction could end at any time and we need to know the // deterministically if it is ending, so we can reset the commit promise // so we use the slower atomic operation else writeStatus = Atomics.or(uint32, flagPosition, flags); outstandingWriteCount++; if (writeStatus & TXN_DELIMITER) { commitPromise = null; // TODO: Don't reset these if this comes from the batch start operation on an event turn batch flushPromise = null; flushResolvers = []; queueCommitResolution(resolution); if (!startAddress) { startAddress = uint32.address + (flagPosition << 2); } } if (!writtenBatchDepth && batchFlushResolvers.length > 0) { flushResolvers.push(...batchFlushResolvers); batchFlushResolvers = []; } if (!flushPromise && overlappingSync) { flushPromise = new Promise((resolve) => { if (writtenBatchDepth) { batchFlushResolvers.push(resolve); } else { flushResolvers.push(resolve); } }); } if (writeStatus & WAITING_OPERATION) { // write thread is waiting write(env.address, 0); } if (outstandingWriteCount > BACKPRESSURE_THRESHOLD && !writeBatchStart) { if (!backpressureArray) backpressureArray = new Int32Array(new SharedArrayBuffer(4), 0, 1); Atomics.wait( backpressureArray, 0, 0, Math.round(outstandingWriteCount / BACKPRESSURE_THRESHOLD), ); } if (startAddress) { if (eventTurnBatching) startWriting(); // start writing immediately because this has already been batched/queued else if (!enqueuedCommit && txnStartThreshold) { enqueuedCommit = commitDelay == 0 && typeof setImmediate != 'undefined' ? setImmediate(() => startWriting()) : setTimeout(() => startWriting(), commitDelay); } else if (outstandingWriteCount > txnStartThreshold) startWriting(); } if ((outstandingWriteCount & 7) === 0) resolveWrites(); if (store.cache) { resolution.meta = { key, store, valueSize: valueBuffer ? valueBuffer.length : 0, }; } resolution.valueBuffer = valueBuffer; if (callback) { if (callback === IF_EXISTS$1) ifVersion = IF_EXISTS$1; else { let meta = resolution.meta || (resolution.meta = {}); meta.reject = callback; meta.resolve = (value) => callback(null, value); return; } } // if it is not conditional because of ifVersion or has any flags that can make the write conditional if (ifVersion === undefined && !(flags & 0x22030)) { if (writtenBatchDepth > 1) { if (!resolution.flag && !store.cache) resolution.flag = NO_RESOLVE; return PROMISE_SUCCESS; // or return undefined? } if (commitPromise) { if (!resolution.flag) resolution.flag = NO_RESOLVE; } else { commitPromise = new Promise((resolve, reject) => { let meta = resolution.meta || (resolution.meta = {}); meta.resolve = resolve; resolve.unconditional = true; meta.reject = reject; }); if (separateFlushed) commitPromise.flushed = overlappingSync ? flushPromise : commitPromise; } return commitPromise; } lastWritePromise = new Promise((resolve, reject) => { let meta = resolution.meta || (resolution.meta = {}); meta.resolve = resolve; meta.reject = reject; }); if (separateFlushed) lastWritePromise.flushed = overlappingSync ? flushPromise : lastWritePromise; return lastWritePromise; }; } Promise.resolve(); function startWriting() { if (enqueuedCommit) { clearImmediate(enqueuedCommit); enqueuedCommit = null; } let resolvers = flushResolvers; env.startWriting(startAddress, (status) => { if (dynamicBytes.uint32[dynamicBytes.position << 1] & TXN_DELIMITER) queueCommitResolution(nextResolution); resolveWrites(true); switch (status) { case 0: for (let resolver of resolvers) { resolver(); } break; case 1: break; case 2: hasUnresolvedTxns = false; executeTxnCallbacks(); return hasUnresolvedTxns; default: try { lmdbError(status); } catch (error) { console.error(error); if (commitRejectPromise) { commitRejectPromise.reject(error); commitRejectPromise = null; } } } }); startAddress = 0; } function queueCommitResolution(resolution) { if (!(resolution.flag & HAS_TXN)) { resolution.flag = HAS_TXN; if (txnResolution) { txnResolution.nextTxn = resolution; //outstandingWriteCount = 0 } else txnResolution = resolution; } } var TXN_DONE = TXN_COMMITTED | TXN_FAILED; function resolveWrites(async) { // clean up finished instructions let instructionStatus; while ( (instructionStatus = unwrittenResolution.uint32[unwrittenResolution.flagPosition]) & 0x1000000 ) { if (unwrittenResolution.callbacks) { nextTxnCallbacks.push(unwrittenResolution.callbacks); unwrittenResolution.callbacks = null; } outstandingWriteCount--; if (unwrittenResolution.flag !== HAS_TXN) { if ( unwrittenResolution.flag === NO_RESOLVE && !unwrittenResolution.meta ) { // in this case we can completely remove from the linked list, clearing more memory lastPromisedResolution.next = unwrittenResolution = unwrittenResolution.next; continue; } unwrittenResolution.uint32 = null; } unwrittenResolution.valueBuffer = null; unwrittenResolution.flag = instructionStatus; lastPromisedResolution = unwrittenResolution; unwrittenResolution = unwrittenResolution.next; } while ( txnResolution && (instructionStatus = txnResolution.uint32[txnResolution.flagPosition] & TXN_DONE) ) { if (instructionStatus & TXN_FAILED) rejectCommit(); else resolveCommit(async); } } function resolveCommit(async) { afterCommit(txnResolution.uint32[txnResolution.flagPosition - 1]); if (async) resetReadTxn(); else queueMicrotask(resetReadTxn); // TODO: only do this if there are actually committed writes? do { if (uncommittedResolution.meta && uncommittedResolution.meta.resolve) { let resolve = uncommittedResolution.meta.resolve; if ( uncommittedResolution.flag & FAILED_CONDITION && !resolve.unconditional ) resolve(false); else resolve(true); } } while ( (uncommittedResolution = uncommittedResolution.next) && uncommittedResolution != txnResolution ); txnResolution = txnResolution.nextTxn; } var commitRejectPromise; function rejectCommit() { afterCommit(); if (!commitRejectPromise) { let rejectFunction; commitRejectPromise = new Promise( (resolve, reject) => (rejectFunction = reject), ); commitRejectPromise.reject = rejectFunction; } do { if (uncommittedResolution.meta && uncommittedResolution.meta.reject) { uncommittedResolution.flag & 0xf; let error = new Error('Commit failed (see commitError for details)'); error.commitError = commitRejectPromise; uncommittedResolution.meta.reject(error); } } while ( (uncommittedResolution = uncommittedResolution.next) && uncommittedResolution != txnResolution ); txnResolution = txnResolution.nextTxn; } function atomicStatus(uint32, flagPosition, newStatus) { if (batchDepth) { // if we are in a batch, the transaction can't close, so we do the faster, // but non-deterministic updates, knowing that the write thread can // just poll for the status change if we miss a status update let writeStatus = uint32[flagPosition]; uint32[flagPosition] = newStatus; return writeStatus; //return Atomics.or(uint32, flagPosition, newStatus) } // otherwise the transaction could end at any time and we need to know the // deterministically if it is ending, so we can reset the commit promise // so we use the slower atomic operation else try { return Atomics.or(uint32, flagPosition, newStatus); } catch (error) { console.error(error); return; } } function afterCommit(txnId) { for (let i = 0, l = afterCommitCallbacks.length; i < l; i++) { try { afterCommitCallbacks[i]({ next: uncommittedResolution, last: txnResolution, txnId, }); } catch (error) { console.error('In aftercommit callback', error); } } } async function executeTxnCallbacks() { env.writeTxn = writeTxn = { write: true }; nextTxnCallbacks.isExecuting = true; for (let i = 0; i < nextTxnCallbacks.length; i++) { let txnCallbacks = nextTxnCallbacks[i]; for (let j = 0, l = txnCallbacks.length; j < l; j++) { let userTxnCallback = txnCallbacks[j]; let asChild = userTxnCallback.asChild; if (asChild) { env.beginTxn(1); // abortable let parentTxn = writeTxn; env.writeTxn = writeTxn = { write: true }; try { let result = userTxnCallback.callback(); if (result && result.then) { hasUnresolvedTxns = true; await result; } if (result === ABORT) env.abortTxn(); else env.commitTxn(); clearWriteTxn(parentTxn); txnCallbacks[j] = result; } catch (error) { clearWriteTxn(parentTxn); env.abortTxn(); txnError(error, txnCallbacks, j); } } else { try { let result = userTxnCallback(); txnCallbacks[j] = result; if (result && result.then) { hasUnresolvedTxns = true; await result; } } catch (error) { txnError(error, txnCallbacks, j); } } } } nextTxnCallbacks = []; clearWriteTxn(null); if (hasUnresolvedTxns) { env.resumeWriting(); } function txnError(error, txnCallbacks, i) { (txnCallbacks.errors || (txnCallbacks.errors = []))[i] = error; txnCallbacks[i] = CALLBACK_THREW; } } function finishBatch() { let bytes = dynamicBytes; let uint32 = bytes.uint32; let nextPosition = bytes.position + 1; let writeStatus; if (nextPosition > newBufferThreshold) { allocateInstructionBuffer(nextPosition); nextResolution.flagPosition = dynamicBytes.position << 1; nextResolution.uint32 = dynamicBytes.uint32; writeStatus = atomicStatus(uint32, bytes.position << 1, 2); // atomically write the end block } else { uint32[nextPosition << 1] = 0; // clear out the next slot writeStatus = atomicStatus(uint32, bytes.position++ << 1, 2); // atomically write the end block nextResolution.flagPosition += 2; } if (writeStatus & WAITING_OPERATION) { write(env.address, 0); } } function clearWriteTxn(parentTxn) { // TODO: We might actually want to track cursors in a write txn and manually // close them. if (writeTxn && writeTxn.refCount > 0) writeTxn.isDone = true; env.writeTxn = writeTxn = parentTxn || null; } Object.assign(LMDBStore.prototype, { put(key, value, versionOrOptions, ifVersion) { let callback, flags = 15, type = typeof versionOrOptions; if (type == 'object' && versionOrOptions) { if (versionOrOptions.noOverwrite) flags |= 0x10; if (versionOrOptions.noDupData) flags |= 0x20; if (versionOrOptions.instructedWrite) flags |= 0x2000; if (versionOrOptions.append) flags |= 0x20000; if (versionOrOptions.ifVersion != undefined) ifVersion = versionOrOptions.ifVersion; versionOrOptions = versionOrOptions.version; if (typeof ifVersion == 'function') callback = ifVersion; } else if (type == 'function') { callback = versionOrOptions; } return writeInstructions( flags, this, key, value, this.useVersions ? versionOrOptions || 0 : undefined, ifVersion, )(callback); }, remove(key, ifVersionOrValue, callback) { let flags = 13; let ifVersion, value; if (ifVersionOrValue !== undefined) { if (typeof ifVersionOrValue == 'function') callback = ifVersionOrValue; else if (ifVersionOrValue === IF_EXISTS$1 && !callback) // we have a handler for IF_EXISTS in the callback handler for remove callback = ifVersionOrValue; else if (this.useVersions) ifVersion = ifVersionOrValue; else { flags = 14; value = ifVersionOrValue; } } return writeInstructions( flags, this, key, value, undefined, ifVersion, )(callback); }, del(key, options, callback) { return this.remove(key, options, callback); }, ifNoExists(key, callback) { return this.ifVersion(key, null, callback); }, ifVersion(key, version, callback, options) { if (!callback) { return new Batch((operations, callback) => { let promise = this.ifVersion(key, version, operations, options); if (callback) promise.then(callback); return promise; }); } if (writeTxn) { if (version === undefined || this.doesExist(key, version)) { callback(); return SYNC_PROMISE_SUCCESS; } return SYNC_PROMISE_FAIL; } let flags = key === undefined || version === undefined ? 1 : 4; if (options?.ifLessThan) flags |= CONDITIONAL_VERSION_LESS_THAN; if (options?.allowNotFound) flags |= CONDITIONAL_ALLOW_NOTFOUND; let finishStartWrite = writeInstructions( flags, this, key, undefined, undefined, version, ); let promise; batchDepth += 2; if (batchDepth > 2) promise = finishStartWrite(); else { writeBatchStart = () => { promise = finishStartWrite(); }; outstandingBatchCount = 0; } try { if (typeof callback === 'function') { callback(); } else { for (let i = 0, l = callback.length; i < l; i++) { let operation = callback[i]; this[operation.type](operation.key, operation.value); } } } finally { if (!promise) { finishBatch(); batchDepth -= 2; promise = finishStartWrite(); // finish write once all the operations have been written (and it hasn't been written prematurely) writeBatchStart = null; } else { batchDepth -= 2; finishBatch(); } } return promise; }, batch(callbackOrOperations) { return this.ifVersion(undefined, undefined, callbackOrOperations); }, drop(callback) { return writeInstructions( 1024 + 12, this, Buffer.from([]), undefined, undefined, undefined, )(callback); }, clearAsync(callback) { if (this.encoder) { if (this.encoder.clearSharedData) this.encoder.clearSharedData(); else if (this.encoder.structures) this.encoder.structures = []; } return writeInstructions( 12, this, Buffer.from([]), undefined, undefined, undefined, )(callback); }, _triggerError() { finishBatch(); }, putSync(key, value, versionOrOptions, ifVersion) { if (writeTxn) return ( this.put(key, value, versionOrOptions, ifVersion) === SYNC_PROMISE_SUCCESS ); else return this.transactionSync( () => this.put(key, value, versionOrOptions, ifVersion) === SYNC_PROMISE_SUCCESS, overlappingSync ? 0x10002 : 2, ); // non-abortable, async flush }, removeSync(key, ifVersionOrValue) { if (writeTxn) return this.remove(key, ifVersionOrValue) === SYNC_PROMISE_SUCCESS; else return this.transactionSync( () => this.remove(key, ifVersionOrValue) === SYNC_PROMISE_SUCCESS, overlappingSync ? 0x10002 : 2, ); // non-abortable, async flush }, transaction(callback) { if (writeTxn && !nextTxnCallbacks.isExecuting) { // already nested in a transaction, just execute and return return callback(); } return this.transactionAsync(callback); }, childTransaction(callback) { if (useWritemap) throw new Error( 'Child transactions are not supported in writemap mode', ); if (writeTxn) { let parentTxn = writeTxn; let thisTxn = (env.writeTxn = writeTxn = { write: true }); env.beginTxn(1); // abortable let callbackDone, finishTxn; try { return (writeTxn.childResults = when( callback(), (finishTxn = (result) => { if (writeTxn !== thisTxn) // need to wait for child txn to finish asynchronously return writeTxn.childResults.then(() => finishTxn(result)); callbackDone = true; if (result === ABORT) env.abortTxn(); else env.commitTxn(); clearWriteTxn(parentTxn); return result; }), (error) => { env.abortTxn(); clearWriteTxn(parentTxn); throw error; }, )); } catch (error) { if (!callbackDone) env.abortTxn(); clearWriteTxn(parentTxn); throw error; } } return this.transactionAsync(callback, true); }, transactionAsync(callback, asChild) { let txnIndex; let txnCallbacks; if (lastQueuedResolution.callbacks) { txnCallbacks = lastQueuedResolution.callbacks; txnIndex = txnCallbacks.push(asChild ? { callback, asChild } : callback) - 1; } else if (nextTxnCallbacks.isExecuting) { txnCallbacks = [asChild ? { callback, asChild } : callback]; txnCallbacks.results = commitPromise; nextTxnCallbacks.push(txnCallbacks); txnIndex = 0; } else { if (writeTxn) throw new Error('Can not enqueue transaction during write txn'); let finishWrite = writeInstructions( 8 | (this.strictAsyncOrder ? 0x100000 : 0), this, ); txnCallbacks = [asChild ? { callback, asChild } : callback]; lastQueuedResolution.callbacks = txnCallbacks; lastQueuedResolution.id = Math.random(); txnCallbacks.results = finishWrite(); txnIndex = 0; } return txnCallbacks.results.then((results) => { let result = txnCallbacks[txnIndex]; if (result === CALLBACK_THREW) throw txnCallbacks.errors[txnIndex]; return result; }); }, transactionSync(callback, flags) { if (writeTxn) { if (!useWritemap && (flags == undefined || flags & 1)) // can't use child transactions in write maps // already nested in a transaction, execute as child transaction (if possible) and return return this.childTransaction(callback); let result = callback(); // else just run in current transaction if (result == ABORT && !abortedNonChildTransactionWarn) { console.warn( 'Can not abort a transaction inside another transaction with ' + (this.cache ? 'caching enabled' : 'useWritemap enabled'), ); abortedNonChildTransactionWarn = true; } return result; } let callbackDone, finishTxn; this.transactions++; if (!env.address) throw new Error( 'The database has been closed and you can not transact on it', ); env.beginTxn(flags == undefined ? 3 : flags); let thisTxn = (writeTxn = env.writeTxn = { write: true }); try { this.emit('begin-transaction'); return (writeTxn.childResults = when( callback(), (finishTxn = (result) => { if (writeTxn !== thisTxn) // need to wait for child txn to finish asynchronously return writeTxn.childResults.then(() => finishTxn(result)); try { callbackDone = true; if (result === ABORT) env.abortTxn(); else { env.commitTxn(); resetReadTxn(); } return result; } finally { clearWriteTxn(null); } }), (error) => { try { env.abortTxn(); } catch (e) {} clearWriteTxn(null); throw error; }, )); } catch (error) { if (!callbackDone) try { env.abortTxn(); } catch (e) {} clearWriteTxn(null); throw error; } }, getWriteTxnId() { return env.getWriteTxnId(); }, transactionSyncStart(callback) { return this.transactionSync(callback, 0); }, // make the db a thenable/promise-like for when the last commit is committed committed: (committed = { then(onfulfilled, onrejected) { if (commitPromise) return commitPromise.then(onfulfilled, onrejected); if (lastWritePromise) // always resolve to true return lastWritePromise.then(() => onfulfilled(true), onrejected); return SYNC_PROMISE_SUCCESS.then(onfulfilled, onrejected); }, }), flushed: { // make this a thenable for when the commit is flushed to disk then(onfulfilled, onrejected) { if (flushPromise) flushPromise.hasCallbacks = true; return Promise.all([flushPromise || committed, lastSyncTxnFlush]).then( onfulfilled, onrejected, ); }, }, _endWrites(resolvedPromise, resolvedSyncPromise) { this.put = this.remove = this.del = this.batch = this.removeSync = this.putSync = this.transactionAsync = this.drop = this.clearAsync = () => { throw new Error('Database is closed'); }; // wait for all txns to finish, checking again after the current txn is done let finalPromise = flushPromise || commitPromise || lastWritePromise; if (flushPromise) flushPromise.hasCallbacks = true; let finalSyncPromise = lastSyncTxnFlush; if ( (finalPromise && resolvedPromise != finalPromise) || (finalSyncPromise ) ) { return Promise.all([finalPromise, finalSyncPromise]).then( () => this._endWrites(finalPromise, finalSyncPromise), () => this._endWrites(finalPromise, finalSyncPromise), ); } Object.defineProperty(env, 'sync', { value: null }); }, on(event, callback) { if (event == 'beforecommit') { eventTurnBatching = true; beforeCommitCallbacks.push(callback); } else if (event == 'aftercommit') afterCommitCallbacks.push(callback); else if (event == 'committed') { this.getUserSharedBuffer('__committed__', new ArrayBuffer(0), { envKey: true, callback, }); } else super.on(event, callback); }, }); } class Batch extends Array { constructor(callback) { super(); this.callback = callback; } put(key, value) { this.push({ type: 'put', key, value }); } del(key) { this.push({ type: 'del', key }); } clear() { this.length = 0; } write(callback) { return this.callback(this, callback); } } function asBinary(buffer) { return { ['\x10binary-data\x02']: buffer, }; } const REUSE_BUFFER_MODE = 512; const writeUint32Key = (key, target, start) => { (target.dataView || (target.dataView = new DataView(target.buffer, 0, target.length))).setUint32(start, key, isLittleEndian); return start + 4; }; const readUint32Key = (target, start) => { return (target.dataView || (target.dataView = new DataView(target.buffer, 0, target.length))).getUint32(start, isLittleEndian); }; const writeBufferKey = (key, target, start) => { target.set(key, start); return key.length + start; }; const Uint8ArraySlice$1 = Uint8Array.prototype.slice; const readBufferKey = (target, start, end) => { return Uint8ArraySlice$1.call(target, start, end); }; let lastEncodedValue, bytes; function applyKeyHandling(store) { if (store.encoding == 'ordered-binary') { store.encoder = store.decoder = { writeKey: orderedBinary.writeKey, readKey: orderedBinary.readKey, }; } if (store.encoder && store.encoder.writeKey && !store.encoder.encode) { store.encoder.encode = function(value, mode) { if (typeof value !== 'object' && value && value === lastEncodedValue) ; else { lastEncodedValue = value; bytes = saveKey(value, this.writeKey, false, store.maxKeySize); } if (bytes.end > 0 && !(REUSE_BUFFER_MODE & mode)) { return bytes.subarray(bytes.start, bytes.end); } return bytes; }; store.encoder.copyBuffers = true; // just an indicator for the buffer reuse in write.js } if (store.decoder && store.decoder.readKey && !store.decoder.decode) { store.decoder.decode = function(buffer) { return this.readKey(buffer, 0, buffer.length); }; store.decoderCopies = true; } if (store.keyIsUint32 || store.keyEncoding == 'uint32') { store.writeKey = writeUint32Key; store.readKey = readUint32Key; } else if (store.keyIsBuffer || store.keyEncoding == 'binary') { store.writeKey = writeBufferKey; store.readKey = readBufferKey; } else if (store.keyEncoder) { store.writeKey = store.keyEncoder.writeKey; store.readKey = store.keyEncoder.readKey; } else { store.writeKey = orderedBinary.writeKey; store.readKey = orderedBinary.readKey; } } let saveBuffer, saveDataView = { setFloat64() {}, setUint32() {} }, saveDataAddress; let savePosition$1 = 8000; let DYNAMIC_KEY_BUFFER_SIZE$1 = 8192; function allocateSaveBuffer() { saveBuffer = typeof Buffer != 'undefined' ? Buffer.alloc(DYNAMIC_KEY_BUFFER_SIZE$1) : new Uint8Array(DYNAMIC_KEY_BUFFER_SIZE$1); saveBuffer.buffer.address = getAddress(saveBuffer.buffer); saveDataAddress = saveBuffer.buffer.address; // TODO: Conditionally only do this for key sequences? saveDataView.setUint32(savePosition$1, 0xffffffff); saveDataView.setFloat64(savePosition$1 + 4, saveDataAddress, isLittleEndian); // save a pointer from the old buffer to the new address for the sake of the prefetch sequences saveDataView = saveBuffer.dataView || (saveBuffer.dataView = new DataView(saveBuffer.buffer, saveBuffer.byteOffset, saveBuffer.byteLength)); savePosition$1 = 0; } function saveKey(key, writeKey, saveTo, maxKeySize, flags) { if (savePosition$1 > 7800) { allocateSaveBuffer(); } let start = savePosition$1; try { savePosition$1 = key === undefined ? start + 4 : writeKey(key, saveBuffer, start + 4); } catch (error) { saveBuffer.fill(0, start + 4); // restore zeros if (error.name == 'RangeError') { if (8180 - start < maxKeySize) { allocateSaveBuffer(); // try again: return saveKey(key, writeKey, saveTo, maxKeySize); } throw new Error('Key was too large, max key size is ' + maxKeySize); } else throw error; } let length = savePosition$1 - start - 4; if (length > maxKeySize) { throw new Error('Key of size ' + length + ' was too large, max key size is ' + maxKeySize); } if (savePosition$1 >= 8160) { // need to reserve enough room at the end for pointers savePosition$1 = start; // reset position allocateSaveBuffer(); // try again: return saveKey(key, writeKey, saveTo, maxKeySize); } if (saveTo) { saveDataView.setUint32(start, flags ? length | flags : length, isLittleEndian); // save the length saveTo.saveBuffer = saveBuffer; savePosition$1 = (savePosition$1 + 12) & 0xfffffc; return start + saveDataAddress; } else { saveBuffer.start = start + 4; saveBuffer.end = savePosition$1; savePosition$1 = (savePosition$1 + 7) & 0xfffff8; // full 64-bit word alignment since these are usually copied return saveBuffer; } } const IF_EXISTS = 3.542694326329068e-103; const ITERATOR_DONE = { done: true, value: undefined }; const Uint8ArraySlice = Uint8Array.prototype.slice; let getValueBytes = globalBuffer; if (!getValueBytes.maxLength) { getValueBytes.maxLength = getValueBytes.length; getValueBytes.isGlobal = true; Object.defineProperty(getValueBytes, 'length', { value: getValueBytes.length, writable: true, configurable: true, }); } const START_ADDRESS_POSITION = 4064; const NEW_BUFFER_THRESHOLD = 0x8000; const SOURCE_SYMBOL = Symbol.for('source'); const UNMODIFIED = {}; let mmaps = []; function addReadMethods( LMDBStore, { maxKeySize, env, keyBytes, keyBytesView, getLastVersion, getLastTxnId }, ) { let readTxn, readTxnRenewed, asSafeBuffer = false; let renewId = 1; let outstandingReads = 0; Object.assign(LMDBStore.prototype, { getString(id, options) { let txn = env.writeTxn || (options && options.transaction) || (readTxnRenewed ? readTxn : renewReadTxn(this)); let string = getStringByBinary( this.dbAddress, this.writeKey(id, keyBytes, 0), txn.address || 0, ); if (typeof string === 'number') { // indicates the buffer wasn't large enough this._allocateGetBuffer(string); // and then try again string = getStringByBinary( this.dbAddress, this.writeKey(id, keyBytes, 0), txn.address || 0, ); } if (string) this.lastSize = string.length; return string; }, getBinaryFast(id, options) { let rc; let txn = env.writeTxn || (options && options.transaction) || (readTxnRenewed ? readTxn : renewReadTxn(this)); rc = this.lastSize = getByBinary( this.dbAddress, this.writeKey(id, keyBytes, 0), (options && options.ifNotTxnId) || 0, txn.address || 0, ); if (rc < 0) { if (rc == -30798) // MDB_NOTFOUND return; // undefined if (rc == -30004) // txn id matched return UNMODIFIED; if ( rc == -30781 /*MDB_BAD_VALSIZE*/ && this.writeKey(id, keyBytes, 0) == 0 ) throw new Error( id === undefined ? 'A key is required for get, but is undefined' : 'Zero length key is not allowed in LMDB', ); if (rc == -30000) // int32 overflow, read uint32 rc = this.lastSize = keyBytesView.getUint32(0, isLittleEndian); else if (rc == -30001) { // shared buffer this.lastSize = keyBytesView.getUint32(0, isLittleEndian); let bufferId = keyBytesView.getUint32(4, isLittleEndian); let bytes = getMMapBuffer(bufferId, this.lastSize); return asSafeBuffer ? Buffer.from(bytes) : bytes; } else throw lmdbError(rc); } let compression = this.compression; let bytes = compression ? compression.getValueBytes : getValueBytes; if (rc > bytes.maxLength) { // this means the target buffer wasn't big enough, so the get failed to copy all the data from the database, need to either grow or use special buffer return this._returnLargeBuffer(() => getByBinary( this.dbAddress, this.writeKey(id, keyBytes, 0), 0, txn.address || 0, ), ); } bytes.length = this.lastSize; return bytes; }, getBFAsync(id, options, callback) { let txn = env.writeTxn || (options && options.transaction) || (readTxnRenewed ? readTxn : renewReadTxn(this)); txn.refCount = (txn.refCount || 0) + 1; outstandingReads++; if (!txn.address) { throw new Error('Invalid transaction, it has no address'); } let address = recordReadInstruction( txn.address, this.db.dbi, id, this.writeKey, maxKeySize, (rc, bufferId, offset, size) => { if (rc && rc !== 1) callback(lmdbError(rc)); outstandingReads--; let buffer = mmaps[bufferId]; if (!buffer) { buffer = mmaps[bufferId] = getSharedBuffer(bufferId, env.address); } //console.log({bufferId, offset, size}) if (buffer.isSharedMap) { // using LMDB shared memory // TODO: We may want explicit support for clearing aborting the transaction on the next event turn, // but for now we are relying on the GC to cleanup transaction for larger blocks of memory let bytes = new Uint8Array(buffer, offset, size); bytes.txn = txn; callback(bytes, 0, size); } else { // using copied memory txn.done(); // decrement and possibly abort callback(buffer, offset, size); } }, ); if (address) { startRead(address, () => { resolveReads(); }); } }, getAsync(id, options, callback) { let promise; if (!callback) promise = new Promise((resolve) => (callback = resolve)); this.getBFAsync(id, options, (buffer, offset, size) => { if (this.useVersions) { // TODO: And get the version offset += 8; size -= 8; } let bytes = new Uint8Array(buffer, offset, size); let value; if (this.decoder) { // the decoder potentially uses the data from the buffer in the future and needs a stable buffer value = bytes && this.decoder.decode(bytes); } else if (this.encoding == 'binary') { value = bytes; } else { value = Buffer.prototype.utf8Slice.call(bytes, 0, size); if (this.encoding == 'json' && value) value = JSON.parse(value); } callback(value); }); return promise; }, retain(data, options) { if (!data) return; let source = data[SOURCE_SYMBOL]; let buffer = source ? source.bytes : data; if (!buffer.isGlobal && !env.writeTxn) { let txn = options?.transaction || (readTxnRenewed ? readTxn : renewReadTxn(this)); buffer.txn