@electric-sql/pglite
Version:
PGlite is a WASM Postgres build packaged into a TypeScript client library that enables you to run Postgres in the browser, Node.js and Bun, with no need to install any other dependencies. It is only 3.7mb gzipped.
1 lines • 1.07 MB
Source Map (JSON)
{"version":3,"sources":["../../../node_modules/.pnpm/tsup@8.3.0_@microsoft+api-extractor@7.47.7_@types+node@20.16.11__postcss@8.4.47_tsx@4.19.1_typescript@5.6.3/node_modules/tsup/assets/cjs_shims.js","../../../node_modules/.pnpm/tinytar@0.1.0/node_modules/tinytar/lib/utils.js","../../../node_modules/.pnpm/tinytar@0.1.0/node_modules/tinytar/lib/constants.js","../../../node_modules/.pnpm/tinytar@0.1.0/node_modules/tinytar/lib/types.js","../../../node_modules/.pnpm/tinytar@0.1.0/node_modules/tinytar/lib/tar.js","../../../node_modules/.pnpm/tinytar@0.1.0/node_modules/tinytar/lib/untar.js","../../../node_modules/.pnpm/tinytar@0.1.0/node_modules/tinytar/index.js","../src/fs/tarUtils.ts","../src/fs/base.ts","../src/fs/nodefs.ts","../src/fs/opfs-ahp.ts","../src/index.ts","../src/pglite.ts","../../../node_modules/.pnpm/async-mutex@0.4.1/node_modules/async-mutex/index.mjs","../src/base.ts","../src/templating.ts","../src/parse.ts","../src/types.ts","../../pg-protocol/src/index.ts","../../pg-protocol/src/serializer.ts","../../pg-protocol/src/buffer-writer.ts","../../pg-protocol/src/string-utils.ts","../../pg-protocol/src/parser.ts","../../pg-protocol/src/messages.ts","../../pg-protocol/src/types.ts","../../pg-protocol/src/buffer-reader.ts","../src/extensionUtils.ts","../src/utils.ts","../src/fs/index.ts","../src/fs/idbfs.ts","../src/fs/memoryfs.ts","../src/postgresMod.ts","../release/postgres.js","../src/interface.ts"],"sourcesContent":["// Shim globals in cjs bundle\n// There's a weird bug that esbuild will always inject importMetaUrl\n// if we export it as `const importMetaUrl = ... __filename ...`\n// But using a function will not cause this issue\n\nconst getImportMetaUrl = () =>\n typeof document === 'undefined'\n ? new URL(`file:${__filename}`).href\n : (document.currentScript && document.currentScript.src) ||\n new URL('main.js', document.baseURI).href\n\nexport const importMetaUrl = /* @__PURE__ */ getImportMetaUrl()\n","'use strict';\n\nvar MAX_SAFE_INTEGER = 9007199254740991;\n\nvar undefined = (function(undefined) {\n return undefined;\n})();\n\nfunction isUndefined(value) {\n return value === undefined;\n}\n\nfunction isString(value) {\n return (typeof value == 'string') ||\n (Object.prototype.toString.call(value) == '[object String]');\n}\n\nfunction isDateTime(value) {\n return (Object.prototype.toString.call(value) == '[object Date]');\n}\n\nfunction isObject(value) {\n return (value !== null) && (typeof value == 'object');\n}\n\nfunction isFunction(value) {\n return typeof value == 'function';\n}\n\nfunction isLength(value) {\n return (typeof value == 'number') &&\n (value > -1) && (value % 1 == 0) &&\n (value <= MAX_SAFE_INTEGER);\n}\n\nfunction isArray(value) {\n return Object.prototype.toString.call(value) == '[object Array]';\n}\n\nfunction isArrayLike(value) {\n return isObject(value) && !isFunction(value) && isLength(value.length);\n}\n\nfunction isArrayBuffer(value) {\n return Object.prototype.toString.call(value) == '[object ArrayBuffer]';\n}\n\nfunction map(array, iteratee) {\n return Array.prototype.map.call(array, iteratee);\n}\n\nfunction find(array, iteratee) {\n var result = undefined;\n\n if (isFunction(iteratee)) {\n Array.prototype.every.call(array, function(item, index, array) {\n var found = iteratee(item, index, array);\n if (found) {\n result = item;\n }\n return !found; // continue if not found\n });\n }\n\n return result;\n}\n\nfunction extend(target /* ...sources */) {\n return Object.assign.apply(null, arguments);\n}\n\nfunction toUint8Array(value) {\n var i;\n var length;\n var result;\n\n if (isString(value)) {\n length = value.length;\n result = new Uint8Array(length);\n for (i = 0; i < length; i++) {\n result[i] = value.charCodeAt(i) & 0xFF;\n }\n return result;\n }\n\n if (isArrayBuffer(value)) {\n return new Uint8Array(value);\n }\n\n if (isObject(value) && isArrayBuffer(value.buffer)) {\n return new Uint8Array(value.buffer);\n }\n\n if (isArrayLike(value)) {\n return new Uint8Array(value);\n }\n\n if (isObject(value) && isFunction(value.toString)) {\n return toUint8Array(value.toString());\n }\n\n return new Uint8Array();\n}\n\nmodule.exports.MAX_SAFE_INTEGER = MAX_SAFE_INTEGER;\n\nmodule.exports.isUndefined = isUndefined;\nmodule.exports.isString = isString;\nmodule.exports.isObject = isObject;\nmodule.exports.isDateTime = isDateTime;\nmodule.exports.isFunction = isFunction;\nmodule.exports.isArray = isArray;\nmodule.exports.isArrayLike = isArrayLike;\nmodule.exports.isArrayBuffer = isArrayBuffer;\nmodule.exports.map = map;\nmodule.exports.find = find;\nmodule.exports.extend = extend;\nmodule.exports.toUint8Array = toUint8Array;\n","'use strict';\n\nvar NULL_CHAR = '\\u0000';\n\nmodule.exports = {\n /* eslint-disable key-spacing */\n\n NULL_CHAR: NULL_CHAR,\n\n TMAGIC: 'ustar' + NULL_CHAR + '00', // 'ustar', NULL, '00'\n OLDGNU_MAGIC: 'ustar ' + NULL_CHAR, // 'ustar ', NULL\n\n // Values used in typeflag field.\n REGTYPE: 0, // regular file\n LNKTYPE: 1, // link\n SYMTYPE: 2, // reserved\n CHRTYPE: 3, // character special\n BLKTYPE: 4, // block special\n DIRTYPE: 5, // directory\n FIFOTYPE: 6, // FIFO special\n CONTTYPE: 7, // reserved\n\n // Bits used in the mode field, values in octal.\n TSUID: parseInt('4000', 8), // set UID on execution\n TSGID: parseInt('2000', 8), // set GID on execution\n TSVTX: parseInt('1000', 8), // reserved\n\n // file permissions\n TUREAD: parseInt('0400', 8), // read by owner\n TUWRITE: parseInt('0200', 8), // write by owner\n TUEXEC: parseInt('0100', 8), // execute/search by owner\n TGREAD: parseInt('0040', 8), // read by group\n TGWRITE: parseInt('0020', 8), // write by group\n TGEXEC: parseInt('0010', 8), // execute/search by group\n TOREAD: parseInt('0004', 8), // read by other\n TOWRITE: parseInt('0002', 8), // write by other\n TOEXEC: parseInt('0001', 8), // execute/search by other\n\n TPERMALL: parseInt('0777', 8), // rwxrwxrwx\n TPERMMASK: parseInt('0777', 8) // permissions bitmask\n\n /* eslint-enable key-spacing */\n};\n","'use strict';\n\nvar utils = require('./utils');\nvar constants = require('./constants');\n\nvar recordSize = 512;\nvar defaultFileMode = constants.TPERMALL; // rwxrwxrwx\nvar defaultUid = 0; // root\nvar defaultGid = 0; // root\n\n/*\n struct posix_header { // byte offset\n char name[100]; // 0\n char mode[8]; // 100\n char uid[8]; // 108\n char gid[8]; // 116\n char size[12]; // 124\n char mtime[12]; // 136\n char chksum[8]; // 148\n char typeflag; // 156\n char linkname[100]; // 157\n char magic[6]; // 257\n char version[2]; // 263\n char uname[32]; // 265\n char gname[32]; // 297\n char devmajor[8]; // 329\n char devminor[8]; // 337\n char prefix[131]; // 345\n char atime[12]; // 476\n char ctime[12]; // 488\n };\n */\n\nvar posixHeader = [\n // <field name>, <size>, <offset>, <used>, <format>, <parse>, [ <check> ]\n ['name', 100, 0, function(file, field) {\n return formatTarString(file[field[0]], field[1]);\n }, function(buffer, offset, field) {\n return parseTarString(buffer.slice(offset, offset + field[1]));\n }],\n ['mode', 8, 100, function(file, field) {\n var mode = file[field[0]] || defaultFileMode;\n mode = mode & constants.TPERMMASK;\n return formatTarNumber(mode, field[1], defaultFileMode);\n }, function(buffer, offset, field) {\n var result = parseTarNumber(buffer.slice(offset, offset + field[1]));\n result &= constants.TPERMMASK;\n return result;\n }],\n ['uid', 8, 108, function(file, field) {\n return formatTarNumber(file[field[0]], field[1], defaultUid);\n }, function(buffer, offset, field) {\n return parseTarNumber(buffer.slice(offset, offset + field[1]));\n }],\n ['gid', 8, 116, function(file, field) {\n return formatTarNumber(file[field[0]], field[1], defaultGid);\n }, function(buffer, offset, field) {\n return parseTarNumber(buffer.slice(offset, offset + field[1]));\n }],\n ['size', 12, 124, function(file, field) {\n return formatTarNumber(file.data.length, field[1]);\n }, function(buffer, offset, field) {\n return parseTarNumber(buffer.slice(offset, offset + field[1]));\n }],\n ['modifyTime', 12, 136, function(file, field) {\n return formatTarDateTime(file[field[0]], field[1]);\n }, function(buffer, offset, field) {\n return parseTarDateTime(buffer.slice(offset, offset + field[1]));\n }],\n ['checksum', 8, 148, function(file, field) {\n return ' '; // placeholder\n }, function(buffer, offset, field) {\n return parseTarNumber(buffer.slice(offset, offset + field[1]));\n }],\n ['type', 1, 156, function(file, field) {\n // get last octal digit; 0 - regular file\n return '' + ((parseInt(file[field[0]], 10) || 0) % 8);\n }, function(buffer, offset, field) {\n return (parseInt(String.fromCharCode(buffer[offset]), 10) || 0) % 8;\n }],\n ['linkName', 100, 157, function(file, field) {\n return ''; // only regular files are supported\n }, function(buffer, offset, field) {\n return parseTarString(buffer.slice(offset, offset + field[1]));\n }],\n ['ustar', 8, 257, function(file, field) {\n return constants.TMAGIC; // magic + version\n }, function(buffer, offset, field) {\n return fixUstarMagic(\n parseTarString(buffer.slice(offset, offset + field[1]), true)\n );\n }, function(file, field) {\n return (file[field[0]] == constants.TMAGIC) ||\n (file[field[0]] == constants.OLDGNU_MAGIC);\n }],\n ['owner', 32, 265, function(file, field) {\n return formatTarString(file[field[0]], field[1]);\n }, function(buffer, offset, field) {\n return parseTarString(buffer.slice(offset, offset + field[1]));\n }],\n ['group', 32, 297, function(file, field) {\n return formatTarString(file[field[0]], field[1]);\n }, function(buffer, offset, field) {\n return parseTarString(buffer.slice(offset, offset + field[1]));\n }],\n ['majorNumber', 8, 329, function(file, field) {\n return ''; // only regular files are supported\n }, function(buffer, offset, field) {\n return parseTarNumber(buffer.slice(offset, offset + field[1]));\n }],\n ['minorNumber', 8, 337, function(file, field) {\n return ''; // only regular files are supported\n }, function(buffer, offset, field) {\n return parseTarNumber(buffer.slice(offset, offset + field[1]));\n }],\n ['prefix', 131, 345, function(file, field) {\n return formatTarString(file[field[0]], field[1]);\n }, function(buffer, offset, field) {\n return parseTarString(buffer.slice(offset, offset + field[1]));\n }],\n ['accessTime', 12, 476, function(file, field) {\n return formatTarDateTime(file[field[0]], field[1]);\n }, function(buffer, offset, field) {\n return parseTarDateTime(buffer.slice(offset, offset + field[1]));\n }],\n ['createTime', 12, 488, function(file, field) {\n return formatTarDateTime(file[field[0]], field[1]);\n }, function(buffer, offset, field) {\n return parseTarDateTime(buffer.slice(offset, offset + field[1]));\n }]\n];\n\nvar effectiveHeaderSize = (function(header) {\n var last = header[header.length - 1];\n return last[2] + last[1]; // offset + size\n})(posixHeader);\n\nfunction fixUstarMagic(value) {\n if (value.length == 8) {\n var chars = value.split('');\n\n if (chars[5] == constants.NULL_CHAR) {\n // TMAGIC ?\n if ((chars[6] == ' ') || (chars[6] == constants.NULL_CHAR)) {\n chars[6] = '0';\n }\n if ((chars[7] == ' ') || (chars[7] == constants.NULL_CHAR)) {\n chars[7] = '0';\n }\n chars = chars.join('');\n return chars == constants.TMAGIC ? chars : value;\n } else if (chars[7] == constants.NULL_CHAR) {\n // OLDGNU_MAGIC ?\n if (chars[5] == constants.NULL_CHAR) {\n chars[5] = ' ';\n }\n if (chars[6] == constants.NULL_CHAR) {\n chars[6] = ' ';\n }\n return chars == constants.OLDGNU_MAGIC ? chars : value;\n }\n }\n return value;\n}\n\nfunction formatTarString(value, length) {\n length -= 1; // preserve space for trailing null-char\n if (utils.isUndefined(value)) {\n value = '';\n }\n value = ('' + value).substr(0, length);\n return value + constants.NULL_CHAR;\n}\n\nfunction formatTarNumber(value, length, defaultValue) {\n defaultValue = parseInt(defaultValue) || 0;\n length -= 1; // preserve space for trailing null-char\n value = (parseInt(value) || defaultValue)\n .toString(8).substr(-length, length);\n while (value.length < length) {\n value = '0' + value;\n }\n return value + constants.NULL_CHAR;\n}\n\nfunction formatTarDateTime(value, length) {\n if (utils.isDateTime(value)) {\n value = Math.floor(1 * value / 1000);\n } else {\n value = parseInt(value, 10);\n if (isFinite(value)) {\n if (value <= 0) {\n return '';\n }\n } else {\n value = Math.floor(1 * new Date() / 1000);\n }\n }\n return formatTarNumber(value, length, 0);\n}\n\nfunction parseTarString(bytes, returnUnprocessed) {\n var result = String.fromCharCode.apply(null, bytes);\n if (returnUnprocessed) {\n return result;\n }\n var index = result.indexOf(constants.NULL_CHAR);\n return index >= 0 ? result.substr(0, index) : result;\n}\n\nfunction parseTarNumber(bytes) {\n var result = String.fromCharCode.apply(null, bytes);\n return parseInt(result.replace(/^0+$/g, ''), 8) || 0;\n}\n\nfunction parseTarDateTime(bytes) {\n if ((bytes.length == 0) || (bytes[0] == 0)) {\n return null;\n }\n return new Date(1000 * parseTarNumber(bytes));\n}\n\nfunction calculateChecksum(buffer, offset, skipChecksum) {\n var from = parseInt(offset, 10) || 0;\n var to = Math.min(from + effectiveHeaderSize, buffer.length);\n var result = 0;\n\n // When calculating checksum, `checksum` field should be\n // threat as filled with space char (byte 32)\n var skipFrom = 0;\n var skipTo = 0;\n if (skipChecksum) {\n posixHeader.every(function(field) {\n if (field[0] == 'checksum') {\n skipFrom = from + field[2];\n skipTo = skipFrom + field[1];\n return false;\n }\n return true;\n });\n }\n\n var whitespace = ' '.charCodeAt(0);\n for (var i = from; i < to; i++) {\n // 262144 = 8^6 - 6 octal digits - maximum possible value for checksum;\n // wrap to avoid numeric overflow\n var byte = (i >= skipFrom) && (i < skipTo) ? whitespace : buffer[i];\n result = (result + byte) % 262144;\n }\n return result;\n}\n\nmodule.exports.recordSize = recordSize;\nmodule.exports.defaultFileMode = defaultFileMode;\nmodule.exports.defaultUid = defaultUid;\nmodule.exports.defaultGid = defaultGid;\nmodule.exports.posixHeader = posixHeader;\nmodule.exports.effectiveHeaderSize = effectiveHeaderSize;\n\nmodule.exports.calculateChecksum = calculateChecksum;\nmodule.exports.formatTarString = formatTarString;\nmodule.exports.formatTarNumber = formatTarNumber;\nmodule.exports.formatTarDateTime = formatTarDateTime;\nmodule.exports.parseTarString = parseTarString;\nmodule.exports.parseTarNumber = parseTarNumber;\nmodule.exports.parseTarDateTime = parseTarDateTime;\n\n","'use strict';\n\nvar constants = require('./constants');\nvar utils = require('./utils');\nvar types = require('./types');\n\nfunction headerSize(file) {\n // header has fixed size\n return types.recordSize;\n}\n\nfunction dataSize(file) {\n // align to record boundary\n return Math.ceil(file.data.length / types.recordSize) * types.recordSize;\n}\n\nfunction allocateBuffer(files) {\n var totalSize = 0;\n\n // Calculate space that will be used by each file\n files.forEach(function(file) {\n totalSize += headerSize(file) + dataSize(file);\n });\n\n // TAR must end with two empty records\n totalSize += types.recordSize * 2;\n\n // Array SHOULD be initialized with zeros:\n // from TypedArray constructor docs:\n // > When creating a TypedArray instance (i.e. instance of Int8Array\n // > or similar), an array buffer is created internally\n // from ArrayBuffer constructor docs:\n // > A new ArrayBuffer object of the specified size.\n // > Its contents are initialized to 0.\n return new Uint8Array(totalSize);\n}\n\nfunction writeHeader(buffer, file, offset) {\n offset = parseInt(offset) || 0;\n\n var currentOffset = offset;\n types.posixHeader.forEach(function(field) {\n var value = field[3](file, field);\n var length = value.length;\n for (var i = 0; i < length; i += 1) {\n buffer[currentOffset + i] = value.charCodeAt(i) & 0xFF;\n }\n currentOffset += field[1]; // move to the next field\n });\n\n var field = utils.find(types.posixHeader, function(field) {\n return field[0] == 'checksum';\n });\n\n if (field) {\n // Patch checksum field\n var checksum = types.calculateChecksum(buffer, offset, true);\n var value = types.formatTarNumber(checksum, field[1] - 2) +\n constants.NULL_CHAR + ' ';\n currentOffset = offset + field[2];\n for (var i = 0; i < value.length; i += 1) {\n // put bytes\n buffer[currentOffset] = value.charCodeAt(i) & 0xFF;\n currentOffset++;\n }\n }\n\n return offset + headerSize(file);\n}\n\nfunction writeData(buffer, file, offset) {\n offset = parseInt(offset, 10) || 0;\n buffer.set(file.data, offset);\n return offset + dataSize(file);\n}\n\nfunction tar(files) {\n files = utils.map(files, function(file) {\n return utils.extend({}, file, {\n data: utils.toUint8Array(file.data)\n });\n });\n\n var buffer = allocateBuffer(files);\n\n var offset = 0;\n files.forEach(function(file) {\n offset = writeHeader(buffer, file, offset);\n offset = writeData(buffer, file, offset);\n });\n\n return buffer;\n}\n\nmodule.exports.tar = tar;\n","'use strict';\n\nvar constants = require('./constants');\nvar utils = require('./utils');\nvar types = require('./types');\n\nvar defaultOptions = {\n extractData: true,\n checkHeader: true,\n checkChecksum: true,\n checkFileSize: true\n};\n\nvar excludeFields = {\n size: true,\n checksum: true,\n ustar: true\n};\n\nvar messages = {\n unexpectedEndOfFile: 'Unexpected end of file.',\n fileCorrupted: 'File is corrupted.',\n checksumCheckFailed: 'Checksum check failed.'\n};\n\nfunction headerSize(header) {\n // header has fixed size\n return types.recordSize;\n}\n\nfunction dataSize(size) {\n // align to record boundary\n return Math.ceil(size / types.recordSize) * types.recordSize;\n}\n\nfunction isEndOfFile(buffer, offset) {\n var from = offset;\n var to = Math.min(buffer.length, offset + types.recordSize * 2);\n for (var i = from; i < to; i++) {\n if (buffer[i] != 0) {\n return false;\n }\n }\n return true;\n}\n\nfunction readHeader(buffer, offset, options) {\n if (buffer.length - offset < types.recordSize) {\n if (options.checkFileSize) {\n throw new Error(messages.unexpectedEndOfFile);\n }\n return null;\n }\n\n offset = parseInt(offset) || 0;\n\n var result = {};\n var currentOffset = offset;\n types.posixHeader.forEach(function(field) {\n result[field[0]] = field[4](buffer, currentOffset, field);\n currentOffset += field[1];\n });\n\n if (result.type != 0) { // only regular files can have data\n result.size = 0;\n }\n\n if (options.checkHeader) {\n types.posixHeader.forEach(function(field) {\n if (utils.isFunction(field[5]) && !field[5](result, field)) {\n var error = new Error(messages.fileCorrupted);\n error.data = {\n offset: offset + field[2],\n field: field[0]\n };\n throw error;\n }\n });\n }\n\n if (options.checkChecksum) {\n var checksum = types.calculateChecksum(buffer, offset, true);\n if (checksum != result.checksum) {\n var error = new Error(messages.checksumCheckFailed);\n error.data = {\n offset: offset,\n header: result,\n checksum: checksum\n };\n throw error;\n }\n }\n\n return result;\n}\n\nfunction readData(buffer, offset, header, options) {\n if (!options.extractData) {\n return null;\n }\n\n if (header.size <= 0) {\n return new Uint8Array();\n }\n return buffer.slice(offset, offset + header.size);\n}\n\nfunction createFile(header, data) {\n var result = {};\n types.posixHeader.forEach(function(field) {\n var name = field[0];\n if (!excludeFields[name]) {\n result[name] = header[name];\n }\n });\n\n result.isOldGNUFormat = header.ustar == constants.OLDGNU_MAGIC;\n\n if (data) {\n result.data = data;\n }\n\n return result;\n}\n\nfunction untar(buffer, options) {\n options = utils.extend({}, defaultOptions, options);\n\n var result = [];\n var offset = 0;\n var size = buffer.length;\n\n while (size - offset >= types.recordSize) {\n buffer = utils.toUint8Array(buffer);\n var header = readHeader(buffer, offset, options);\n if (!header) {\n break;\n }\n offset += headerSize(header);\n\n var data = readData(buffer, offset, header, options);\n result.push(createFile(header, data));\n offset += dataSize(header.size);\n\n if (isEndOfFile(buffer, offset)) {\n break;\n }\n }\n\n return result;\n}\n\nmodule.exports.untar = untar;\n","'use strict';\n\n// http://www.gnu.org/software/tar/manual/html_node/Standard.html\n\nvar utils = require('./lib/utils');\nvar constants = require('./lib/constants');\nvar tar = require('./lib/tar');\nvar untar = require('./lib/untar');\n\nutils.extend(module.exports, tar, untar, constants);\n","import { tar, untar, type TarFile, REGTYPE, DIRTYPE } from 'tinytar'\nimport type { FS } from '../postgresMod.js'\n\nexport type DumpTarCompressionOptions = 'none' | 'gzip' | 'auto'\n\nexport async function dumpTar(\n FS: FS,\n pgDataDir: string,\n dbname: string = 'pgdata',\n compression: DumpTarCompressionOptions = 'auto',\n): Promise<File | Blob> {\n const tarball = createTarball(FS, pgDataDir)\n const [compressed, zipped] = await maybeZip(tarball, compression)\n const filename = dbname + (zipped ? '.tar.gz' : '.tar')\n const type = zipped ? 'application/x-gzip' : 'application/x-tar'\n if (typeof File !== 'undefined') {\n return new File([compressed], filename, {\n type,\n })\n } else {\n return new Blob([compressed], {\n type,\n })\n }\n}\n\nconst compressedMimeTypes = [\n 'application/x-gtar',\n 'application/x-tar+gzip',\n 'application/x-gzip',\n 'application/gzip',\n]\n\nexport async function loadTar(\n FS: FS,\n file: File | Blob,\n pgDataDir: string,\n): Promise<void> {\n let tarball = new Uint8Array(await file.arrayBuffer())\n const filename =\n typeof File !== 'undefined' && file instanceof File ? file.name : undefined\n const compressed =\n compressedMimeTypes.includes(file.type) ||\n filename?.endsWith('.tgz') ||\n filename?.endsWith('.tar.gz')\n if (compressed) {\n tarball = await unzip(tarball)\n }\n\n let files\n try {\n files = untar(tarball)\n } catch (e) {\n if (e instanceof Error && e.message.includes('File is corrupted')) {\n // The file may be compressed, but had the wrong mime type, try unzipping it\n tarball = await unzip(tarball)\n files = untar(tarball)\n } else {\n throw e\n }\n }\n\n for (const file of files) {\n const filePath = pgDataDir + file.name\n\n // Ensure the directory structure exists\n const dirPath = filePath.split('/').slice(0, -1)\n for (let i = 1; i <= dirPath.length; i++) {\n const dir = dirPath.slice(0, i).join('/')\n if (!FS.analyzePath(dir).exists) {\n FS.mkdir(dir)\n }\n }\n\n // Write the file or directory\n if (file.type === REGTYPE) {\n FS.writeFile(filePath, file.data)\n FS.utime(\n filePath,\n dateToUnixTimestamp(file.modifyTime),\n dateToUnixTimestamp(file.modifyTime),\n )\n } else if (file.type === DIRTYPE) {\n FS.mkdir(filePath)\n }\n }\n}\n\nfunction readDirectory(FS: FS, path: string) {\n const files: TarFile[] = []\n\n const traverseDirectory = (currentPath: string) => {\n const entries = FS.readdir(currentPath)\n entries.forEach((entry) => {\n if (entry === '.' || entry === '..') {\n return\n }\n const fullPath = currentPath + '/' + entry\n const stats = FS.stat(fullPath)\n const data = FS.isFile(stats.mode)\n ? FS.readFile(fullPath, { encoding: 'binary' })\n : new Uint8Array(0)\n files.push({\n name: fullPath.substring(path.length), // remove the root path\n mode: stats.mode,\n size: stats.size,\n type: FS.isFile(stats.mode) ? REGTYPE : DIRTYPE,\n modifyTime: stats.mtime,\n data,\n })\n if (FS.isDir(stats.mode)) {\n traverseDirectory(fullPath)\n }\n })\n }\n\n traverseDirectory(path)\n return files\n}\n\nexport function createTarball(FS: FS, directoryPath: string) {\n const files = readDirectory(FS, directoryPath)\n const tarball = tar(files)\n return tarball\n}\n\nexport async function maybeZip(\n file: Uint8Array,\n compression: DumpTarCompressionOptions = 'auto',\n): Promise<[Uint8Array, boolean]> {\n if (compression === 'none') {\n return [file, false]\n } else if (typeof CompressionStream !== 'undefined') {\n return [await zipBrowser(file), true]\n } else if (\n typeof process !== 'undefined' &&\n process.versions &&\n process.versions.node\n ) {\n return [await zipNode(file), true]\n } else if (compression === 'auto') {\n return [file, false]\n } else {\n throw new Error('Compression not supported in this environment')\n }\n}\n\nexport async function zipBrowser(file: Uint8Array): Promise<Uint8Array> {\n const cs = new CompressionStream('gzip')\n const writer = cs.writable.getWriter()\n const reader = cs.readable.getReader()\n\n writer.write(file)\n writer.close()\n\n const chunks: Uint8Array[] = []\n\n while (true) {\n const { value, done } = await reader.read()\n if (done) break\n if (value) chunks.push(value)\n }\n\n const compressed = new Uint8Array(\n chunks.reduce((acc, chunk) => acc + chunk.length, 0),\n )\n let offset = 0\n chunks.forEach((chunk) => {\n compressed.set(chunk, offset)\n offset += chunk.length\n })\n\n return compressed\n}\n\nexport async function zipNode(file: Uint8Array): Promise<Uint8Array> {\n const { promisify } = await import('util')\n const { gzip } = await import('zlib')\n const gzipPromise = promisify(gzip)\n return await gzipPromise(file)\n}\n\nexport async function unzip(file: Uint8Array): Promise<Uint8Array> {\n if (typeof CompressionStream !== 'undefined') {\n return await unzipBrowser(file)\n } else if (\n typeof process !== 'undefined' &&\n process.versions &&\n process.versions.node\n ) {\n return await unzipNode(file)\n } else {\n throw new Error('Unsupported environment for decompression')\n }\n}\n\nexport async function unzipBrowser(file: Uint8Array): Promise<Uint8Array> {\n const ds = new DecompressionStream('gzip')\n const writer = ds.writable.getWriter()\n const reader = ds.readable.getReader()\n\n writer.write(file)\n writer.close()\n\n const chunks: Uint8Array[] = []\n\n while (true) {\n const { value, done } = await reader.read()\n if (done) break\n if (value) chunks.push(value)\n }\n\n const decompressed = new Uint8Array(\n chunks.reduce((acc, chunk) => acc + chunk.length, 0),\n )\n let offset = 0\n chunks.forEach((chunk) => {\n decompressed.set(chunk, offset)\n offset += chunk.length\n })\n\n return decompressed\n}\n\nexport async function unzipNode(file: Uint8Array): Promise<Uint8Array> {\n const { promisify } = await import('util')\n const { gunzip } = await import('zlib')\n const gunzipPromise = promisify(gunzip)\n return await gunzipPromise(file)\n}\n\nfunction dateToUnixTimestamp(date: Date | number | undefined): number {\n if (!date) {\n return Math.floor(Date.now() / 1000)\n } else {\n return typeof date === 'number' ? date : Math.floor(date.getTime() / 1000)\n }\n}\n","import type { PostgresMod } from '../postgresMod.js'\nimport type { PGlite } from '../pglite.js'\nimport { dumpTar, type DumpTarCompressionOptions } from './tarUtils.js'\n\nexport const WASM_PREFIX = '/tmp/pglite'\nexport const PGDATA = WASM_PREFIX + '/' + 'base'\n\nexport type FsType = 'nodefs' | 'idbfs' | 'memoryfs' | 'opfs-ahp'\n\n/**\n * Filesystem interface.\n * All virtual filesystems that are compatible with PGlite must implement\n * this interface.\n */\nexport interface Filesystem {\n /**\n * Initiate the filesystem and return the options to pass to the emscripten module.\n */\n init(\n pg: PGlite,\n emscriptenOptions: Partial<PostgresMod>,\n ): Promise<{ emscriptenOpts: Partial<PostgresMod> }>\n\n /**\n * Sync the filesystem to any underlying storage.\n */\n syncToFs(relaxedDurability?: boolean): Promise<void>\n\n /**\n * Sync the filesystem from any underlying storage.\n */\n initialSyncFs(): Promise<void>\n\n /**\n * Dump the PGDATA dir from the filesystem to a gziped tarball.\n */\n dumpTar(\n dbname: string,\n compression?: DumpTarCompressionOptions,\n ): Promise<File | Blob>\n\n /**\n * Close the filesystem.\n */\n closeFs(): Promise<void>\n}\n\n/**\n * Base class for all emscripten built-in filesystems.\n */\nexport class EmscriptenBuiltinFilesystem implements Filesystem {\n protected dataDir?: string\n protected pg?: PGlite\n\n constructor(dataDir?: string) {\n this.dataDir = dataDir\n }\n\n async init(pg: PGlite, emscriptenOptions: Partial<PostgresMod>) {\n this.pg = pg\n return { emscriptenOpts: emscriptenOptions }\n }\n\n async syncToFs(_relaxedDurability?: boolean) {}\n\n async initialSyncFs() {}\n\n async closeFs() {}\n\n async dumpTar(dbname: string, compression?: DumpTarCompressionOptions) {\n return dumpTar(this.pg!.Module.FS, PGDATA, dbname, compression)\n }\n}\n\n/**\n * Abstract base class for all custom virtual filesystems.\n * Each custom filesystem needs to implement an interface similar to the NodeJS FS API.\n */\nexport abstract class BaseFilesystem implements Filesystem {\n protected dataDir?: string\n protected pg?: PGlite\n readonly debug: boolean\n\n constructor(dataDir?: string, { debug = false }: { debug?: boolean } = {}) {\n this.dataDir = dataDir\n this.debug = debug\n }\n\n async syncToFs(_relaxedDurability?: boolean) {}\n\n async initialSyncFs() {}\n\n async closeFs() {}\n\n async dumpTar(dbname: string, compression?: DumpTarCompressionOptions) {\n return dumpTar(this.pg!.Module.FS, PGDATA, dbname, compression)\n }\n\n async init(pg: PGlite, emscriptenOptions: Partial<PostgresMod>) {\n this.pg = pg\n const options: Partial<PostgresMod> = {\n ...emscriptenOptions,\n preRun: [\n ...(emscriptenOptions.preRun || []),\n (mod: PostgresMod) => {\n const EMFS = createEmscriptenFS(mod, this)\n mod.FS.mkdir(PGDATA)\n mod.FS.mount(EMFS, {}, PGDATA)\n },\n ],\n }\n return { emscriptenOpts: options }\n }\n\n // Filesystem API\n\n abstract chmod(path: string, mode: number): void\n abstract close(fd: number): void\n abstract fstat(fd: number): FsStats\n abstract lstat(path: string): FsStats\n abstract mkdir(\n path: string,\n options?: { recursive?: boolean; mode?: number },\n ): void\n abstract open(path: string, flags?: string, mode?: number): number\n abstract readdir(path: string): string[]\n abstract read(\n fd: number,\n buffer: Uint8Array, // Buffer to read into\n offset: number, // Offset in buffer to start writing to\n length: number, // Number of bytes to read\n position: number, // Position in file to read from\n ): number\n abstract rename(oldPath: string, newPath: string): void\n abstract rmdir(path: string): void\n abstract truncate(\n path: string,\n len: number, // Length to truncate to - defaults to 0\n ): void\n abstract unlink(path: string): void\n abstract utimes(path: string, atime: number, mtime: number): void\n abstract writeFile(\n path: string,\n data: string | Uint8Array,\n options?: { encoding?: string; mode?: number; flag?: string },\n ): void\n abstract write(\n fd: number,\n buffer: Uint8Array, // Buffer to read from\n offset: number, // Offset in buffer to start reading from\n length: number, // Number of bytes to write\n position: number, // Position in file to write to\n ): number\n}\n\nexport type FsStats = {\n dev: number\n ino: number\n mode: number\n nlink: number\n uid: number\n gid: number\n rdev: number\n size: number\n blksize: number\n blocks: number\n atime: number\n mtime: number\n ctime: number\n}\n\ntype EmscriptenFileSystem = Emscripten.FileSystemType & {\n createNode: (\n parent: FSNode | null,\n name: string,\n mode: number,\n dev?: any,\n ) => FSNode\n node_ops: FS.NodeOps\n stream_ops: FS.StreamOps & {\n dup: (stream: FSStream) => void\n mmap: (\n stream: FSStream,\n length: number,\n position: number,\n prot: any,\n flags: any,\n ) => { ptr: number; allocated: boolean }\n msync: (\n stream: FSStream,\n buffer: Uint8Array,\n offset: number,\n length: number,\n mmapFlags: any,\n ) => number\n }\n} & { [key: string]: any }\n\ntype FSNode = FS.FSNode & {\n node_ops: FS.NodeOps\n stream_ops: FS.StreamOps\n}\n\ntype FSStream = FS.FSStream & {\n node: FSNode\n shared: {\n refcount: number\n }\n}\n\ntype FSMount = FS.Mount & {\n opts: {\n root: string\n }\n}\n\ntype EmscriptenFS = PostgresMod['FS'] & {\n createNode: (\n parent: FSNode | null,\n name: string,\n mode: number,\n dev?: any,\n ) => FSNode\n}\n\nexport const ERRNO_CODES = {\n EBADF: 8,\n EBADFD: 127,\n EEXIST: 20,\n EINVAL: 28,\n EISDIR: 31,\n ENODEV: 43,\n ENOENT: 44,\n ENOTDIR: 54,\n ENOTEMPTY: 55,\n} as const\n\n/**\n * Create an emscripten filesystem that uses the BaseFilesystem.\n * @param Module The emscripten module\n * @param baseFS The BaseFilesystem implementation\n * @returns The emscripten filesystem\n */\nconst createEmscriptenFS = (Module: PostgresMod, baseFS: BaseFilesystem) => {\n const FS = Module.FS as EmscriptenFS\n const log = baseFS.debug ? console.log : null\n const EMFS = {\n tryFSOperation<T>(f: () => T): T {\n try {\n return f()\n } catch (e: any) {\n if (!e.code) throw e\n if (e.code === 'UNKNOWN') throw new FS.ErrnoError(ERRNO_CODES.EINVAL)\n throw new FS.ErrnoError(e.code)\n }\n },\n mount(_mount: FSMount): FSNode {\n return EMFS.createNode(null, '/', 16384 | 511, 0)\n },\n syncfs(\n _mount: FS.Mount,\n _populate: any, // This has the wrong type in @types/emscripten\n _done: (err?: number | null) => unknown,\n ): void {\n // noop\n },\n createNode(\n parent: FSNode | null,\n name: string,\n mode: number,\n _dev?: any,\n ): FSNode {\n if (!FS.isDir(mode) && !FS.isFile(mode)) {\n throw new FS.ErrnoError(28)\n }\n const node = FS.createNode(parent, name, mode)\n node.node_ops = EMFS.node_ops\n node.stream_ops = EMFS.stream_ops\n return node\n },\n getMode: function (path: string): number {\n log?.('getMode', path)\n return EMFS.tryFSOperation(() => {\n const stats = baseFS.lstat(path)\n return stats.mode\n })\n },\n realPath: function (node: FSNode): string {\n const parts: string[] = []\n while (node.parent !== node) {\n parts.push(node.name)\n node = node.parent as FSNode\n }\n parts.push((node.mount as FSMount).opts.root)\n parts.reverse()\n return parts.join('/')\n },\n node_ops: {\n getattr(node: FSNode): FS.Stats {\n log?.('getattr', EMFS.realPath(node))\n const path = EMFS.realPath(node)\n return EMFS.tryFSOperation(() => {\n const stats = baseFS.lstat(path)\n return {\n ...stats,\n dev: 0,\n ino: node.id,\n nlink: 1,\n rdev: node.rdev,\n atime: new Date(stats.atime),\n mtime: new Date(stats.mtime),\n ctime: new Date(stats.ctime),\n }\n })\n },\n setattr(node: FSNode, attr: FS.Stats): void {\n log?.('setattr', EMFS.realPath(node), attr)\n const path = EMFS.realPath(node)\n EMFS.tryFSOperation(() => {\n if (attr.mode !== undefined) {\n baseFS.chmod(path, attr.mode)\n }\n if (attr.size !== undefined) {\n baseFS.truncate(path, attr.size)\n }\n if (attr.timestamp !== undefined) {\n baseFS.utimes(path, attr.timestamp, attr.timestamp)\n }\n if (attr.size !== undefined) {\n baseFS.truncate(path, attr.size)\n }\n })\n },\n lookup(parent: FSNode, name: string): FSNode {\n log?.('lookup', EMFS.realPath(parent), name)\n const path = [EMFS.realPath(parent), name].join('/')\n const mode = EMFS.getMode(path)\n return EMFS.createNode(parent, name, mode)\n },\n mknod(parent: FSNode, name: string, mode: number, dev: unknown): FSNode {\n log?.('mknod', EMFS.realPath(parent), name, mode, dev)\n const node = EMFS.createNode(parent, name, mode, dev)\n // create the backing node for this in the fs root as well\n const path = EMFS.realPath(node)\n return EMFS.tryFSOperation(() => {\n if (FS.isDir(node.mode)) {\n baseFS.mkdir(path, { mode })\n } else {\n baseFS.writeFile(path, '', { mode })\n }\n return node\n })\n },\n rename(oldNode: FSNode, newDir: FSNode, newName: string): void {\n log?.('rename', EMFS.realPath(oldNode), EMFS.realPath(newDir), newName)\n const oldPath = EMFS.realPath(oldNode)\n const newPath = [EMFS.realPath(newDir), newName].join('/')\n EMFS.tryFSOperation(() => {\n baseFS.rename(oldPath, newPath)\n })\n oldNode.name = newName\n },\n unlink(parent: FSNode, name: string): void {\n log?.('unlink', EMFS.realPath(parent), name)\n const path = [EMFS.realPath(parent), name].join('/')\n try {\n baseFS.unlink(path)\n } catch (e: any) {\n // no-op\n }\n },\n rmdir(parent: FSNode, name: string): void {\n log?.('rmdir', EMFS.realPath(parent), name)\n const path = [EMFS.realPath(parent), name].join('/')\n return EMFS.tryFSOperation(() => {\n baseFS.rmdir(path)\n })\n },\n readdir(node: FSNode): string[] {\n log?.('readdir', EMFS.realPath(node))\n const path = EMFS.realPath(node)\n return EMFS.tryFSOperation(() => {\n return baseFS.readdir(path)\n })\n },\n symlink(parent: FSNode, newName: string, oldPath: string): void {\n log?.('symlink', EMFS.realPath(parent), newName, oldPath)\n // This is not supported by EMFS\n throw new FS.ErrnoError(63)\n },\n readlink(node: FSNode): string {\n log?.('readlink', EMFS.realPath(node))\n // This is not supported by EMFS\n throw new FS.ErrnoError(63)\n },\n },\n stream_ops: {\n open(stream: FSStream): void {\n log?.('open stream', EMFS.realPath(stream.node))\n const path = EMFS.realPath(stream.node)\n return EMFS.tryFSOperation(() => {\n if (FS.isFile(stream.node.mode)) {\n stream.shared.refcount = 1\n stream.nfd = baseFS.open(path)\n }\n })\n },\n close(stream: FSStream): void {\n log?.('close stream', EMFS.realPath(stream.node))\n return EMFS.tryFSOperation(() => {\n if (\n FS.isFile(stream.node.mode) &&\n stream.nfd &&\n --stream.shared.refcount === 0\n ) {\n baseFS.close(stream.nfd)\n }\n })\n },\n dup(stream: FSStream) {\n log?.('dup stream', EMFS.realPath(stream.node))\n stream.shared.refcount++\n },\n read(\n stream: FSStream, // Stream to read from\n buffer: Uint8Array, // Buffer to read into - Wrong type in @types/emscripten\n offset: number, // Offset in buffer to start writing to\n length: number, // Number of bytes to read\n position: number, // Position in file to read from\n ): number {\n log?.(\n 'read stream',\n EMFS.realPath(stream.node),\n offset,\n length,\n position,\n )\n if (length === 0) return 0\n const ret = EMFS.tryFSOperation(() =>\n baseFS.read(\n stream.nfd!,\n buffer as unknown as Uint8Array,\n offset,\n length,\n position,\n ),\n )\n return ret\n },\n write(\n stream: FSStream, // Stream to write to\n buffer: Uint8Array, // Buffer to read from - Wrong type in @types/emscripten\n offset: number, // Offset in buffer to start writing from\n length: number, // Number of bytes to write\n position: number, // Position in file to write to\n ): number {\n log?.(\n 'write stream',\n EMFS.realPath(stream.node),\n offset,\n length,\n position,\n )\n return EMFS.tryFSOperation(() =>\n baseFS.write(\n stream.nfd!,\n buffer.buffer as unknown as Uint8Array,\n offset,\n length,\n position,\n ),\n )\n },\n llseek(stream: FSStream, offset: number, whence: number): number {\n log?.('llseek stream', EMFS.realPath(stream.node), offset, whence)\n let position = offset\n if (whence === 1) {\n position += stream.position\n } else if (whence === 2) {\n if (FS.isFile(stream.node.mode)) {\n EMFS.tryFSOperation(() => {\n const stat = baseFS.fstat(stream.nfd!)\n position += stat.size\n })\n }\n }\n if (position < 0) {\n throw new FS.ErrnoError(28)\n }\n return position\n },\n mmap(\n stream: FSStream,\n length: number,\n position: number,\n prot: any,\n flags: any,\n ) {\n log?.(\n 'mmap stream',\n EMFS.realPath(stream.node),\n length,\n position,\n prot,\n flags,\n )\n if (!FS.isFile(stream.node.mode)) {\n throw new FS.ErrnoError(ERRNO_CODES.ENODEV)\n }\n\n const ptr = (Module as any).mmapAlloc(length) // TODO: Fix type and check this is exported\n\n EMFS.stream_ops.read(\n stream,\n Module.HEAP8 as unknown as Uint8Array,\n ptr,\n length,\n position,\n )\n return { ptr, allocated: true }\n },\n msync(\n stream: FSStream,\n buffer: Uint8Array,\n offset: number,\n length: number,\n mmapFlags: any,\n ) {\n log?.(\n 'msync stream',\n EMFS.realPath(stream.node),\n offset,\n length,\n mmapFlags,\n )\n EMFS.stream_ops.write(stream, buffer, 0, length, offset)\n return 0\n },\n },\n } satisfies EmscriptenFileSystem\n return EMFS\n}\n","import * as fs from 'fs'\nimport * as path from 'path'\nimport { EmscriptenBuiltinFilesystem, PGDATA } from './base.js'\nimport type { PostgresMod } from '../postgresMod.js'\nimport { PGlite } from '../pglite.js'\n\nexport class NodeFS extends EmscriptenBuiltinFilesystem {\n protected rootDir: string\n\n constructor(dataDir: string) {\n super(dataDir)\n this.rootDir = path.resolve(dataDir)\n if (!fs.existsSync(path.join(this.rootDir))) {\n fs.mkdirSync(this.rootDir)\n }\n }\n\n async init(pg: PGlite, opts: Partial<PostgresMod>) {\n this.pg = pg\n const options: Partial<PostgresMod> = {\n ...opts,\n preRun: [\n ...(opts.preRun || []),\n (mod: any) => {\n const nodefs = mod.FS.filesystems.NODEFS\n mod.FS.mkdir(PGDATA)\n mod.FS.mount(nodefs, { root: this.rootDir }, PGDATA)\n },\n ],\n }\n return { emscriptenOpts: options }\n }\n\n async closeFs(): Promise<void> {\n this.pg!.Module.FS.quit()\n }\n}\n","import { BaseFilesystem, ERRNO_CODES, type FsStats } from './base.js'\nimport type { PostgresMod } from '../postgresMod.js'\nimport { PGlite } from '../pglite.js'\n\nexport interface OpfsAhpOptions {\n initialPoolSize?: number\n maintainedPoolSize?: number\n debug?: boolean\n}\n\n// TypeScript doesn't have a built-in type for FileSystemSyncAccessHandle\nexport interface FileSystemSyncAccessHandle {\n close(): void\n flush(): void\n getSize(): number\n read(buffer: ArrayBuffer, options: { at: number }): number\n truncate(newSize: number): void\n write(buffer: ArrayBuffer, options: { at: number }): number\n}\n\n// State\n\nconst STATE_FILE = 'state.txt'\nconst DATA_DIR = 'data'\nconst INITIAL_MODE = {\n DIR: 16384,\n FILE: 32768,\n}\n\nexport interface State {\n root: DirectoryNode\n pool: PoolFilenames\n}\n\nexport type PoolFilenames = Array<string>\n\n// WAL\n\nexport interface WALEntry {\n opp: string\n args: any[]\n}\n\n// Node tree\n\nexport type NodeType = 'file' | 'directory'\n\ninterface BaseNode {\n type: NodeType\n lastModified: number\n mode: number\n}\n\nexport interface FileNode extends BaseNode {\n type: 'file'\n backingFilename: string\n}\n\nexport interface DirectoryNode extends BaseNode {\n type: 'directory'\n children: { [filename: string]: Node }\n}\n\nexport type Node = FileNode | DirectoryNode\n\n/**\n * PGlite OPFS access handle pool filesystem.\n * Opens a pool of sync access handles and then allocates them as needed.\n */\nexport class OpfsAhpFS extends BaseFilesystem {\n declare readonly dataDir: string\n readonly initialPoolSize: number\n readonly maintainedPoolSize: number\n\n #opfsRootAh!: FileSystemDirectoryHandle\n #rootAh!: FileSystemDirectoryHandle\n #dataDirAh!: FileSystemDirectoryHandle\n\n #stateFH!: FileSystemFileHandle\n #stateSH!: FileSystemSyncAccessHandle\n\n #fh: Map<string, FileSystemFileHandle> = new Map()\n #sh: Map<string, FileSystemSyncAccessHandle> = new Map()\n\n #handleIdCounter = 0\n #openHandlePaths: Map<number, string> = new Map()\n #openHandleIds: Map<string, number> = new Map()\n\n state!: State\n lastCheckpoint = 0\n checkpointInterval = 1000 * 60 // 1 minute\n poolCounter = 0\n\n #unsyncedSH = new Set<FileSystemSyncAccessHandle>()\n\n constructor(\n dataDir: string,\n {\n initialPoolSize = 1000,\n maintainedPoolSize = 100,\n debug = false,\n }: OpfsAhpOptions = {},\n ) {\n super(dataDir, { debug })\n this.initialPoolSize = initialPoolSize\n this.maintainedPoolSize = maintainedPoolSize\n }\n\n async init(pg: PGlite, opts: Partial<PostgresMod>) {\n await this.#init()\n return super.init(pg, opts)\n }\n\n async syncToFs(relaxedDurability = false) {\n await this.maybeCheckpointState()\n await this.maintainPool()\n if (!relaxedDurability) {\n this.flush()\n }\n }\n\n async closeFs(): Promise<void> {\n for (const sh of this.#sh.values()) {\n sh.close()\n }\n this.#stateSH.flush()\n this.#stateSH.close()\n this.pg!.Module.FS.quit()\n }\n\n async #init() {\n this.#opfsRootAh = await navigator.storage.getDirectory()\n this.#rootAh = await this.#resolveOpfsDirectory(this.dataDir!, {\n create: true,\n })\n this.#dataDirAh = await this.#resolveOpfsDirectory(DATA_DIR, {\n from: this.#rootAh,\n create: true,\n })\n\n this.#stateFH = await this.#rootAh.getFileHandle(STATE_FILE, {\n create: true,\n })\n this.#stateSH = await (this.#stateFH as any).createSyncAccessHandle()\n\n const stateAB = new ArrayBuffer(this.#stateSH.getSize())\n this.#stateSH.read(stateAB, { at: 0 })\n let state: State\n const stateLines = new TextDecoder().decode(stateAB).split('\\n')\n // Line 1 is a base state object.\n // Lines 1+n are WAL entries.\n\n let isNewState = false\n try {\n state = JSON.parse(stateLines[0])\n } catch (e) {\n state = {\n root: {\n type: 'directory',\n lastModified: Date.now(),\n mode: INITIAL_MODE.DIR,\n children: {},\n },\n pool: [],\n }\n // write new state to file\n this.#stateSH.truncate(0)\n this.#stateSH.write(new TextEncoder().encode(JSON.stringify(state)), {\n at: 0,\n })\n isNewState = true\n }\n this.state = state\n\n // Apply WAL entries\n const wal = stateLines\n .slice(1)\n .filter(Boolean)\n .map((line) => JSON.parse(line))\n for (const entry of wal) {\n const methodName = `_${entry.opp}State`\n if (typeof this[methodName as keyof this] === 'function') {\n try {\n const method = this[methodName as keyof this] as any\n method.bind(this)(...entry.args)\n } catch (e) {\n console.warn('Error applying OPFS AHP WAL entry', entry, e)\n }\n }\n }\n\n // Open all file handles for dir tree\n const walkPromises: Promise