@netlify/content-engine
Version:
124 lines • 5.69 kB
JavaScript
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.defaultLedgersRoot = void 0;
exports.makeFSLedgerWriteHandler = makeFSLedgerWriteHandler;
exports.makeFSLedgerReadHandler = makeFSLedgerReadHandler;
exports.getLedgerBlocks = getLedgerBlocks;
const path_1 = __importDefault(require("path"));
const fs_1 = __importDefault(require("fs"));
const zlib_1 = require("zlib");
const base_handler_1 = require("../base-handler");
exports.defaultLedgersRoot = path_1.default.join(process.cwd(), `.engine-ledgers`);
// Takes in a root dir and returns a ledger write handler.
// When requests come in to write to the ledger, the ledger is stored in a path inside the rootDir on disk
function makeFSLedgerWriteHandler({ rootDir = exports.defaultLedgersRoot } = { rootDir: exports.defaultLedgersRoot }) {
return (0, base_handler_1.makeLedgerWriteHandler)({
getWritableStream: (ledgerDetails) => {
const filePath = (0, base_handler_1.makeLedgerFilePath)(ledgerDetails);
const localPath = path_1.default.join(rootDir, filePath);
const parentDir = path_1.default.dirname(localPath);
if (!fs_1.default.existsSync(parentDir)) {
fs_1.default.mkdirSync(parentDir, {
recursive: true,
});
}
return fs_1.default.createWriteStream(path_1.default.join(rootDir, filePath));
},
});
}
// TODO: refactor this to be re-usable by different "adapters" similar to makeLedgerWriteHandler above with ledgerDetails and a returned read stream
function makeFSLedgerReadHandler({ rootDir = exports.defaultLedgersRoot } = { rootDir: exports.defaultLedgersRoot }) {
return async (req, res, next) => {
const ledgerId =
// TODO: update gql pods to use headers, not url paths (for consistency)
req.params.dataLayerId || (0, base_handler_1.parseStringHeader)(req.headers, `x-ledger-id`);
const configurationId = req.params.configurationId ||
// TODO: update gql pods to use headers, not url paths (for consistency)
(0, base_handler_1.parseStringHeader)(req.headers, `x-configuration-id`);
const cacheId = req.params.cacheId ||
// TODO: update gql pods to use headers, not url paths (for consistency)
(0, base_handler_1.parseStringHeader)(req.headers, `x-cache-id`);
const startBlockId = (0, base_handler_1.parseStringHeader)(req.headers, [
`x-start-block`,
// TODO: update gql pods to use x-start-block
`x-consumer-offset`,
]);
const endBlockId = (req.headers["x-end-block"] ||
req.headers["x-end-offset"]);
const { files, allFileNames } = getLedgerBlocks({
configurationId,
cacheId,
ledgerId,
rootDir,
endBlockId,
startBlockId,
});
const lastFile = allFileNames.findIndex((file) => file.startsWith(`block-id-${endBlockId}`));
const lastFileName = lastFile !== -1 ? allFileNames[lastFile + 1] : undefined;
const lastBlockVersion = lastFileName
? (0, base_handler_1.getIdFromLedgerFilePath)(lastFileName)
: endBlockId || "";
res.setHeader(`x-consumer-offset`, lastBlockVersion);
// TODO: have gql api return this header instead (for consistency)
res.setHeader(`x-block-id`, lastBlockVersion);
for (const file of files) {
// We can't use pipeline as there's no way to tell it to not end
// the write stream when the read stream finishes.
await new Promise((resolve, reject) => {
let isEnded = false;
const cleanup = () => {
if (!isEnded) {
isEnded = true;
res.write(`\n`);
resolve(null);
}
};
const unzippedStream = file.readStream.pipe((0, zlib_1.createGunzip)());
unzippedStream
.on(`data`, (buf) => {
res.write(buf);
})
.on(`finish`, cleanup)
.on(`close`, cleanup)
.on(`end`, cleanup)
.on(`error`, (error) => {
reject(error);
});
});
}
res.status(200).end();
return next();
};
}
function getLedgerBlocks({ ledgerId, configurationId, cacheId, startBlockId, endBlockId, rootDir, }) {
const filePath = (0, base_handler_1.makeLedgerFilePath)({
ledgerId,
cacheId,
configurationId,
});
const localPath = path_1.default.join(rootDir, filePath);
const allFileNames = fs_1.default.readdirSync(path_1.default.join(rootDir, filePath))?.sort();
const files = allFileNames
?.filter((file) => {
if (endBlockId) {
return (/gzip/.test(file) &&
file < `block-id-${endBlockId}.jsonl.gzip` &&
file >= `block-id-${startBlockId}.jsonl.gzip`);
}
else {
return (/gzip/.test(file) &&
(!startBlockId || file >= `block-id-${startBlockId}.jsonl.gzip`));
}
})
?.map((file) => {
return {
name: file,
readStream: fs_1.default.createReadStream(path_1.default.join(localPath, file)),
};
});
return { allFileNames, files };
}
//# sourceMappingURL=fs.js.map
;