envio
Version:
A latency and sync speed optimized, developer friendly blockchain data indexer.
699 lines (667 loc) • 31.4 kB
JavaScript
// Generated by ReScript, PLEASE EDIT WITH CARE
;
var Fs = require("fs");
var Path = require("path");
var $$Array = require("rescript/lib/js/array.js");
var Table = require("./db/Table.res.js");
var Utils = require("./Utils.res.js");
var Js_exn = require("rescript/lib/js/js_exn.js");
var Schema = require("./db/Schema.res.js");
var Js_dict = require("rescript/lib/js/js_dict.js");
var Logging = require("./Logging.res.js");
var $$Promise = require("./bindings/Promise.res.js");
var Internal = require("./Internal.res.js");
var Belt_Array = require("rescript/lib/js/belt_Array.js");
var Caml_option = require("rescript/lib/js/caml_option.js");
var Persistence = require("./Persistence.res.js");
var EntityHistory = require("./db/EntityHistory.res.js");
var InternalTable = require("./db/InternalTable.res.js");
var Child_process = require("child_process");
var Caml_exceptions = require("rescript/lib/js/caml_exceptions.js");
var S$RescriptSchema = require("rescript-schema/src/S.res.js");
var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
var getCacheRowCountFnName = "get_cache_row_count";
function makeCreateIndexQuery(tableName, indexFields, pgSchema) {
var indexName = tableName + "_" + indexFields.join("_");
var index = Belt_Array.map(indexFields, (function (idx) {
return "\"" + idx + "\"";
})).join(", ");
return "CREATE INDEX IF NOT EXISTS \"" + indexName + "\" ON \"" + pgSchema + "\".\"" + tableName + "\"(" + index + ");";
}
function makeCreateTableIndicesQuery(table, pgSchema) {
var tableName = table.tableName;
var createIndex = function (indexField) {
return makeCreateIndexQuery(tableName, [indexField], pgSchema);
};
var createCompositeIndex = function (indexFields) {
return makeCreateIndexQuery(tableName, indexFields, pgSchema);
};
var singleIndices = Table.getSingleIndices(table);
var compositeIndices = Table.getCompositeIndices(table);
return Belt_Array.map(singleIndices, createIndex).join("\n") + Belt_Array.map(compositeIndices, createCompositeIndex).join("\n");
}
function makeCreateTableQuery(table, pgSchema, isNumericArrayAsText) {
var fieldsMapped = Belt_Array.map(Table.getFields(table), (function (field) {
var defaultValue = field.defaultValue;
var isArray = field.isArray;
var fieldType = field.fieldType;
var fieldName = Table.getDbFieldName(field);
var tmp;
tmp = fieldType === "TIMESTAMP" || fieldType === "TIMESTAMP WITH TIME ZONE" || fieldType === "JSONB" || fieldType === "SERIAL" || fieldType === "TEXT" || fieldType === "DOUBLE PRECISION" || fieldType === "NUMERIC" || fieldType === "BOOLEAN" || fieldType === "BIGINT" || fieldType === "INTEGER" || fieldType === "TIMESTAMP WITH TIME ZONE NULL" ? (
fieldType === "NUMERIC" && isArray && isNumericArrayAsText ? "TEXT" : fieldType
) : (
fieldType.startsWith("NUMERIC(") ? fieldType : "\"" + pgSchema + "\"." + fieldType
);
return "\"" + fieldName + "\" " + tmp + (
isArray ? "[]" : ""
) + (
defaultValue !== undefined ? " DEFAULT " + defaultValue : (
field.isNullable ? "" : " NOT NULL"
)
);
})).join(", ");
var primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table);
var primaryKey = Belt_Array.map(primaryKeyFieldNames, (function (field) {
return "\"" + field + "\"";
})).join(", ");
return "CREATE TABLE IF NOT EXISTS \"" + pgSchema + "\".\"" + table.tableName + "\"(" + fieldsMapped + (
primaryKeyFieldNames.length !== 0 ? ", PRIMARY KEY(" + primaryKey + ")" : ""
) + ");";
}
function makeInitializeTransaction(pgSchema, pgUser, isHasuraEnabled, chainConfigsOpt, entitiesOpt, enumsOpt, isEmptyPgSchemaOpt) {
var chainConfigs = chainConfigsOpt !== undefined ? chainConfigsOpt : [];
var entities = entitiesOpt !== undefined ? entitiesOpt : [];
var enums = enumsOpt !== undefined ? enumsOpt : [];
var isEmptyPgSchema = isEmptyPgSchemaOpt !== undefined ? isEmptyPgSchemaOpt : false;
var generalTables = [
InternalTable.Chains.table,
InternalTable.PersistedState.table,
InternalTable.Checkpoints.table,
InternalTable.RawEvents.table
];
var allTables = $$Array.copy(generalTables);
var allEntityTables = [];
entities.forEach(function (entity) {
allEntityTables.push(entity.table);
allTables.push(entity.table);
allTables.push(entity.entityHistory.table);
});
var derivedSchema = Schema.make(allEntityTables);
var query = {
contents: (
isEmptyPgSchema && pgSchema === "public" ? "" : "DROP SCHEMA IF EXISTS \"" + pgSchema + "\" CASCADE;\nCREATE SCHEMA \"" + pgSchema + "\";\n"
) + ("GRANT ALL ON SCHEMA \"" + pgSchema + "\" TO \"" + pgUser + "\";\nGRANT ALL ON SCHEMA \"" + pgSchema + "\" TO public;")
};
enums.forEach(function (enumConfig) {
var enumCreateQuery = "CREATE TYPE \"" + pgSchema + "\"." + enumConfig.name + " AS ENUM(" + enumConfig.variants.map(function (v) {
return "'" + v + "'";
}).join(", ") + ");";
query.contents = query.contents + "\n" + enumCreateQuery;
});
allTables.forEach(function (table) {
query.contents = query.contents + "\n" + makeCreateTableQuery(table, pgSchema, isHasuraEnabled);
});
allTables.forEach(function (table) {
var indices = makeCreateTableIndicesQuery(table, pgSchema);
if (indices !== "") {
query.contents = query.contents + "\n" + indices;
return ;
}
});
entities.forEach(function (entity) {
Table.getDerivedFromFields(entity.table).forEach(function (derivedFromField) {
var indexField = Utils.unwrapResultExn(Schema.getDerivedFromFieldName(derivedSchema, derivedFromField));
query.contents = query.contents + "\n" + makeCreateIndexQuery(derivedFromField.derivedFromEntity, [indexField], pgSchema);
});
});
query.contents = query.contents + "\n" + InternalTable.Views.makeMetaViewQuery(pgSchema);
query.contents = query.contents + "\n" + InternalTable.Views.makeChainMetadataViewQuery(pgSchema);
var initialChainsValuesQuery = InternalTable.Chains.makeInitialValuesQuery(pgSchema, chainConfigs);
if (initialChainsValuesQuery !== undefined) {
query.contents = query.contents + "\n" + initialChainsValuesQuery;
}
return [
query.contents,
"CREATE OR REPLACE FUNCTION " + getCacheRowCountFnName + "(table_name text) \nRETURNS integer AS $$\nDECLARE\n result integer;\nBEGIN\n EXECUTE format('SELECT COUNT(*) FROM \"" + pgSchema + "\".%I', table_name) INTO result;\n RETURN result;\nEND;\n$$ LANGUAGE plpgsql;"
];
}
function makeLoadByIdQuery(pgSchema, tableName) {
return "SELECT * FROM \"" + pgSchema + "\".\"" + tableName + "\" WHERE id = $1 LIMIT 1;";
}
function makeLoadByFieldQuery(pgSchema, tableName, fieldName, operator) {
return "SELECT * FROM \"" + pgSchema + "\".\"" + tableName + "\" WHERE \"" + fieldName + "\" " + operator + " $1;";
}
function makeLoadByIdsQuery(pgSchema, tableName) {
return "SELECT * FROM \"" + pgSchema + "\".\"" + tableName + "\" WHERE id = ANY($1::text[]);";
}
function makeLoadAllQuery(pgSchema, tableName) {
return "SELECT * FROM \"" + pgSchema + "\".\"" + tableName + "\";";
}
function makeInsertUnnestSetQuery(pgSchema, table, itemSchema, isRawEvents) {
var match = Table.toSqlParams(table, itemSchema, pgSchema);
var quotedNonPrimaryFieldNames = match.quotedNonPrimaryFieldNames;
var primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table);
return "INSERT INTO \"" + pgSchema + "\".\"" + table.tableName + "\" (" + match.quotedFieldNames.join(", ") + ")\nSELECT * FROM unnest(" + match.arrayFieldTypes.map(function (arrayFieldType, idx) {
return "$" + (idx + 1 | 0).toString() + "::" + arrayFieldType;
}).join(",") + ")" + (
isRawEvents || primaryKeyFieldNames.length === 0 ? "" : "ON CONFLICT(" + primaryKeyFieldNames.map(function (fieldName) {
return "\"" + fieldName + "\"";
}).join(",") + ") DO " + (
Utils.$$Array.isEmpty(quotedNonPrimaryFieldNames) ? "NOTHING" : "UPDATE SET " + quotedNonPrimaryFieldNames.map(function (fieldName) {
return fieldName + " = EXCLUDED." + fieldName;
}).join(",")
)
) + ";";
}
function makeInsertValuesSetQuery(pgSchema, table, itemSchema, itemsCount) {
var match = Table.toSqlParams(table, itemSchema, pgSchema);
var quotedNonPrimaryFieldNames = match.quotedNonPrimaryFieldNames;
var quotedFieldNames = match.quotedFieldNames;
var primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table);
var fieldsCount = quotedFieldNames.length;
var placeholders = "";
for(var idx = 1; idx <= itemsCount; ++idx){
if (idx > 1) {
placeholders = placeholders + ",";
}
placeholders = placeholders + "(";
for(var fieldIdx = 0; fieldIdx < fieldsCount; ++fieldIdx){
if (fieldIdx > 0) {
placeholders = placeholders + ",";
}
placeholders = placeholders + ("$" + (Math.imul(fieldIdx, itemsCount) + idx | 0).toString());
}
placeholders = placeholders + ")";
}
return "INSERT INTO \"" + pgSchema + "\".\"" + table.tableName + "\" (" + quotedFieldNames.join(", ") + ")\nVALUES" + placeholders + (
primaryKeyFieldNames.length !== 0 ? "ON CONFLICT(" + primaryKeyFieldNames.map(function (fieldName) {
return "\"" + fieldName + "\"";
}).join(",") + ") DO " + (
Utils.$$Array.isEmpty(quotedNonPrimaryFieldNames) ? "NOTHING" : "UPDATE SET " + quotedNonPrimaryFieldNames.map(function (fieldName) {
return fieldName + " = EXCLUDED." + fieldName;
}).join(",")
) : ""
) + ";";
}
function makeTableBatchSetQuery(pgSchema, table, itemSchema) {
var match = Table.toSqlParams(table, itemSchema, pgSchema);
var isRawEvents = table.tableName === InternalTable.RawEvents.table.tableName;
var isHistoryUpdate = table.tableName.startsWith(EntityHistory.historyTablePrefix);
if ((isRawEvents || !match.hasArrayField) && !isHistoryUpdate) {
return {
query: makeInsertUnnestSetQuery(pgSchema, table, itemSchema, isRawEvents),
convertOrThrow: S$RescriptSchema.compile(S$RescriptSchema.unnest(match.dbSchema), "Output", "Input", "Sync", false),
isInsertValues: false
};
} else {
return {
query: makeInsertValuesSetQuery(pgSchema, table, itemSchema, 500),
convertOrThrow: S$RescriptSchema.compile(S$RescriptSchema.preprocess(S$RescriptSchema.unnest(itemSchema), (function (param) {
return {
s: (function (prim) {
return prim.flat(1);
})
};
})), "Output", "Input", "Sync", false),
isInsertValues: true
};
}
}
function chunkArray(arr, chunkSize) {
var chunks = [];
var i = 0;
while(i < arr.length) {
var chunk = arr.slice(i, i + chunkSize | 0);
chunks.push(chunk);
i = i + chunkSize | 0;
};
return chunks;
}
function removeInvalidUtf8InPlace(entities) {
entities.forEach(function (item) {
Utils.Dict.forEachWithKey(item, (function (value, key) {
if (typeof value === "string") {
item[key] = value.replaceAll("\x00", "");
return ;
}
}));
});
}
var pgErrorMessageSchema = S$RescriptSchema.object(function (s) {
return s.f("message", S$RescriptSchema.string);
});
var PgEncodingError = /* @__PURE__ */Caml_exceptions.create("PgStorage.PgEncodingError");
var setQueryCache = new WeakMap();
async function setOrThrow(sql, items, table, itemSchema, pgSchema) {
if (items.length === 0) {
return ;
}
var cached = setQueryCache.get(table);
var data;
if (cached !== undefined) {
data = Caml_option.valFromOption(cached);
} else {
var newQuery = makeTableBatchSetQuery(pgSchema, table, itemSchema);
setQueryCache.set(table, newQuery);
data = newQuery;
}
try {
if (!data.isInsertValues) {
return await sql.unsafe(data.query, data.convertOrThrow(items), {prepare: true});
}
var chunks = chunkArray(items, 500);
var responses = [];
chunks.forEach(function (chunk) {
var chunkSize = chunk.length;
var isFullChunk = chunkSize === 500;
var response = sql.unsafe(isFullChunk ? data.query : makeInsertValuesSetQuery(pgSchema, table, itemSchema, chunkSize), data.convertOrThrow(chunk), {prepare: true});
responses.push(response);
});
await Promise.all(responses);
return ;
}
catch (raw_exn){
var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
if (exn.RE_EXN_ID === S$RescriptSchema.Raised) {
throw {
RE_EXN_ID: Persistence.StorageError,
message: "Failed to convert items for table \"" + table.tableName + "\"",
reason: exn,
Error: new Error()
};
}
throw {
RE_EXN_ID: Persistence.StorageError,
message: "Failed to insert items into table \"" + table.tableName + "\"",
reason: Utils.prettifyExn(exn),
Error: new Error()
};
}
}
function makeSchemaTableNamesQuery(pgSchema) {
return "SELECT table_name FROM information_schema.tables WHERE table_schema = '" + pgSchema + "';";
}
var cacheTablePrefixLength = Internal.cacheTablePrefix.length;
function makeSchemaCacheTableInfoQuery(pgSchema) {
return "SELECT \n t.table_name,\n " + getCacheRowCountFnName + "(t.table_name) as count\n FROM information_schema.tables t\n WHERE t.table_schema = '" + pgSchema + "' \n AND t.table_name LIKE '" + Internal.cacheTablePrefix + "%';";
}
var psqlExecState = {
contents: "Unknown"
};
async function getConnectedPsqlExec(pgUser, pgHost, pgDatabase, pgPort) {
var promise = psqlExecState.contents;
if (typeof promise === "object") {
if (promise.TAG === "Pending") {
return await promise._0;
} else {
return promise._0;
}
}
var promise$1 = new Promise((function (resolve, _reject) {
var binary = "psql";
Child_process.exec(binary + " --version", (function (error, param, param$1) {
if (error === null) {
return resolve({
TAG: "Ok",
_0: binary + " -h " + pgHost + " -p " + pgPort.toString() + " -U " + pgUser + " -d " + pgDatabase
});
}
var binary$1 = "docker-compose exec -T -u " + pgUser + " envio-postgres psql";
Child_process.exec(binary$1 + " --version", (function (error, param, param$1) {
if (error === null) {
return resolve({
TAG: "Ok",
_0: binary$1 + " -h " + pgHost + " -p " + (5432).toString() + " -U " + pgUser + " -d " + pgDatabase
});
} else {
return resolve({
TAG: "Error",
_0: "Please check if \"psql\" binary is installed or docker-compose is running for the local indexer."
});
}
}));
}));
}));
psqlExecState.contents = {
TAG: "Pending",
_0: promise$1
};
var result = await promise$1;
psqlExecState.contents = {
TAG: "Resolved",
_0: result
};
return result;
}
function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, isHasuraEnabled, onInitialize, onNewTables) {
var psqlExecOptions_env = Js_dict.fromArray([
[
"PGPASSWORD",
pgPassword
],
[
"PATH",
process.env.PATH
]
]);
var psqlExecOptions = {
env: psqlExecOptions_env
};
var cacheDirPath = Path.resolve("..", ".envio", "cache");
var isInitialized = async function () {
var envioTables = await sql.unsafe("SELECT table_schema FROM information_schema.tables WHERE table_schema = '" + pgSchema + "' AND (table_name = 'event_sync_state' OR table_name = '" + InternalTable.Chains.table.tableName + "');");
return Utils.$$Array.notEmpty(envioTables);
};
var restoreEffectCache = async function (withUpload) {
if (withUpload) {
var nothingToUploadErrorMessage = "Nothing to upload.";
var match = await Promise.all([
$$Promise.$$catch(Fs.promises.readdir(cacheDirPath).then(function (e) {
return {
TAG: "Ok",
_0: e
};
}), (function (param) {
return Promise.resolve({
TAG: "Error",
_0: nothingToUploadErrorMessage
});
})),
getConnectedPsqlExec(pgUser, pgHost, pgDatabase, pgPort)
]);
var exit = 0;
var message;
var entries = match[0];
if (entries.TAG === "Ok") {
var psqlExec = match[1];
if (psqlExec.TAG === "Ok") {
var psqlExec$1 = psqlExec._0;
var cacheFiles = entries._0.filter(function (entry) {
return entry.endsWith(".tsv");
});
await Promise.all(cacheFiles.map(function (entry) {
var effectName = entry.slice(0, -4);
var table = Internal.makeCacheTable(effectName);
return sql.unsafe(makeCreateTableQuery(table, pgSchema, false)).then(function () {
var inputFile = Path.join(cacheDirPath, entry);
var command = psqlExec$1 + " -c 'COPY \"" + pgSchema + "\".\"" + table.tableName + "\" FROM STDIN WITH (FORMAT text, HEADER);' < " + inputFile;
return new Promise((function (resolve, reject) {
Child_process.exec(command, psqlExecOptions, (function (error, stdout, param) {
if (error === null) {
return resolve(stdout);
} else {
return reject(error);
}
}));
}));
});
}));
Logging.info("Successfully uploaded cache.");
} else {
message = match[1]._0;
exit = 1;
}
} else {
message = entries._0;
exit = 1;
}
if (exit === 1) {
if (message === nothingToUploadErrorMessage) {
Logging.info("No cache found to upload.");
} else {
Logging.error("Failed to upload cache, continuing without it. " + message);
}
}
}
var cacheTableInfo = await sql.unsafe(makeSchemaCacheTableInfoQuery(pgSchema));
if (withUpload && Utils.$$Array.notEmpty(cacheTableInfo) && onNewTables !== undefined) {
await onNewTables(cacheTableInfo.map(function (info) {
return info.table_name;
}));
}
var cache = {};
cacheTableInfo.forEach(function (param) {
var effectName = param.table_name.slice(cacheTablePrefixLength);
cache[effectName] = {
effectName: effectName,
count: param.count
};
});
return cache;
};
var initialize = async function (chainConfigsOpt, entitiesOpt, enumsOpt) {
var chainConfigs = chainConfigsOpt !== undefined ? chainConfigsOpt : [];
var entities = entitiesOpt !== undefined ? entitiesOpt : [];
var enums = enumsOpt !== undefined ? enumsOpt : [];
var schemaTableNames = await sql.unsafe(makeSchemaTableNamesQuery(pgSchema));
if (Utils.$$Array.notEmpty(schemaTableNames) && !schemaTableNames.some(function (table) {
return table.table_name === InternalTable.Chains.table.tableName ? true : table.table_name === "event_sync_state";
})) {
Js_exn.raiseError("Cannot run Envio migrations on PostgreSQL schema \"" + pgSchema + "\" because it contains non-Envio tables. Running migrations would delete all data in this schema.\n\nTo resolve this:\n1. If you want to use this schema, first backup any important data, then drop it with: \"pnpm envio local db-migrate down\"\n2. Or specify a different schema name by setting the \"ENVIO_PG_PUBLIC_SCHEMA\" environment variable\n3. Or manually drop the schema in your database if you're certain the data is not needed.");
}
var queries = makeInitializeTransaction(pgSchema, pgUser, isHasuraEnabled, chainConfigs, entities, enums, Utils.$$Array.isEmpty(schemaTableNames));
await sql.begin(function (sql) {
return Promise.all(queries.map(function (query) {
return sql.unsafe(query);
}));
});
var cache = await restoreEffectCache(true);
if (onInitialize !== undefined) {
await onInitialize();
}
return {
cleanRun: true,
cache: cache,
chains: chainConfigs.map(function (chainConfig) {
return {
id: chainConfig.id,
startBlock: chainConfig.startBlock,
endBlock: chainConfig.endBlock,
maxReorgDepth: chainConfig.maxReorgDepth,
progressBlockNumber: -1,
numEventsProcessed: 0,
firstEventBlockNumber: undefined,
timestampCaughtUpToHeadOrEndblock: undefined,
dynamicContracts: []
};
}),
checkpointId: InternalTable.Checkpoints.initialCheckpointId,
reorgCheckpoints: []
};
};
var loadByIdsOrThrow = async function (ids, table, rowsSchema) {
var rows;
try {
rows = await (
ids.length !== 1 ? sql.unsafe(makeLoadByIdsQuery(pgSchema, table.tableName), [ids], {prepare: true}) : sql.unsafe(makeLoadByIdQuery(pgSchema, table.tableName), ids, {prepare: true})
);
}
catch (raw_exn){
var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
throw {
RE_EXN_ID: Persistence.StorageError,
message: "Failed loading \"" + table.tableName + "\" from storage by ids",
reason: exn,
Error: new Error()
};
}
try {
return S$RescriptSchema.parseOrThrow(rows, rowsSchema);
}
catch (raw_exn$1){
var exn$1 = Caml_js_exceptions.internalToOCamlException(raw_exn$1);
throw {
RE_EXN_ID: Persistence.StorageError,
message: "Failed to parse \"" + table.tableName + "\" loaded from storage by ids",
reason: exn$1,
Error: new Error()
};
}
};
var loadByFieldOrThrow = async function (fieldName, fieldSchema, fieldValue, operator, table, rowsSchema) {
var params;
try {
params = [S$RescriptSchema.reverseConvertToJsonOrThrow(fieldValue, fieldSchema)];
}
catch (raw_exn){
var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
throw {
RE_EXN_ID: Persistence.StorageError,
message: "Failed loading \"" + table.tableName + "\" from storage by field \"" + fieldName + "\". Couldn't serialize provided value.",
reason: exn,
Error: new Error()
};
}
var rows;
try {
rows = await sql.unsafe(makeLoadByFieldQuery(pgSchema, table.tableName, fieldName, operator), params, {prepare: true});
}
catch (raw_exn$1){
var exn$1 = Caml_js_exceptions.internalToOCamlException(raw_exn$1);
throw {
RE_EXN_ID: Persistence.StorageError,
message: "Failed loading \"" + table.tableName + "\" from storage by field \"" + fieldName + "\"",
reason: exn$1,
Error: new Error()
};
}
try {
return S$RescriptSchema.parseOrThrow(rows, rowsSchema);
}
catch (raw_exn$2){
var exn$2 = Caml_js_exceptions.internalToOCamlException(raw_exn$2);
throw {
RE_EXN_ID: Persistence.StorageError,
message: "Failed to parse \"" + table.tableName + "\" loaded from storage by ids",
reason: exn$2,
Error: new Error()
};
}
};
var setOrThrow$1 = function (items, table, itemSchema) {
return setOrThrow(sql, items, table, itemSchema, pgSchema);
};
var setEffectCacheOrThrow = async function (effect, items, initialize) {
var match = effect.storageMeta;
var table = match.table;
if (initialize) {
await sql.unsafe(makeCreateTableQuery(table, pgSchema, false));
if (onNewTables !== undefined) {
await onNewTables([table.tableName]);
}
}
return await setOrThrow$1(items, table, match.itemSchema);
};
var dumpEffectCache = async function () {
try {
var cacheTableInfo = (await sql.unsafe(makeSchemaCacheTableInfoQuery(pgSchema))).filter(function (i) {
return i.count > 0;
});
if (!Utils.$$Array.notEmpty(cacheTableInfo)) {
return ;
}
try {
await Fs.promises.access(cacheDirPath);
}
catch (exn){
await Fs.promises.mkdir(cacheDirPath, {
recursive: true
});
}
var psqlExec = await getConnectedPsqlExec(pgUser, pgHost, pgDatabase, pgPort);
if (psqlExec.TAG !== "Ok") {
return Logging.error("Failed to dump cache. " + psqlExec._0);
}
var psqlExec$1 = psqlExec._0;
Logging.info("Dumping cache: " + cacheTableInfo.map(function (param) {
return param.table_name + " (" + String(param.count) + " rows)";
}).join(", "));
var promises = cacheTableInfo.map(async function (param) {
var tableName = param.table_name;
var cacheName = tableName.slice(cacheTablePrefixLength);
var outputFile = Path.join(cacheDirPath, cacheName + ".tsv");
var command = psqlExec$1 + " -c 'COPY \"" + pgSchema + "\".\"" + tableName + "\" TO STDOUT WITH (FORMAT text, HEADER);' > " + outputFile;
return new Promise((function (resolve, reject) {
Child_process.exec(command, psqlExecOptions, (function (error, stdout, param) {
if (error === null) {
return resolve(stdout);
} else {
return reject(error);
}
}));
}));
});
await Promise.all(promises);
return Logging.info("Successfully dumped cache to " + cacheDirPath);
}
catch (raw_exn){
var exn$1 = Caml_js_exceptions.internalToOCamlException(raw_exn);
return Logging.errorWithExn(Utils.prettifyExn(exn$1), "Failed to dump cache.");
}
};
var resumeInitialState = async function () {
var match = await Promise.all([
restoreEffectCache(false),
InternalTable.Chains.getInitialState(sql, pgSchema).then(function (rawInitialStates) {
return Belt_Array.map(rawInitialStates, (function (rawInitialState) {
return {
id: rawInitialState.id,
startBlock: rawInitialState.startBlock,
endBlock: Caml_option.null_to_opt(rawInitialState.endBlock),
maxReorgDepth: rawInitialState.maxReorgDepth,
progressBlockNumber: rawInitialState.progressBlockNumber,
numEventsProcessed: rawInitialState.numEventsProcessed,
firstEventBlockNumber: Caml_option.null_to_opt(rawInitialState.firstEventBlockNumber),
timestampCaughtUpToHeadOrEndblock: Caml_option.null_to_opt(rawInitialState.timestampCaughtUpToHeadOrEndblock),
dynamicContracts: rawInitialState.dynamicContracts
};
}));
}),
sql.unsafe(InternalTable.Checkpoints.makeCommitedCheckpointIdQuery(pgSchema)),
sql.unsafe(InternalTable.Checkpoints.makeGetReorgCheckpointsQuery(pgSchema))
]);
return {
cleanRun: false,
cache: match[0],
chains: match[1],
checkpointId: match[2][0].id,
reorgCheckpoints: match[3]
};
};
return {
isInitialized: isInitialized,
initialize: initialize,
resumeInitialState: resumeInitialState,
loadByIdsOrThrow: loadByIdsOrThrow,
loadByFieldOrThrow: loadByFieldOrThrow,
setOrThrow: setOrThrow$1,
setEffectCacheOrThrow: setEffectCacheOrThrow,
dumpEffectCache: dumpEffectCache
};
}
var maxItemsPerQuery = 500;
exports.getCacheRowCountFnName = getCacheRowCountFnName;
exports.makeCreateIndexQuery = makeCreateIndexQuery;
exports.makeCreateTableIndicesQuery = makeCreateTableIndicesQuery;
exports.makeCreateTableQuery = makeCreateTableQuery;
exports.makeInitializeTransaction = makeInitializeTransaction;
exports.makeLoadByIdQuery = makeLoadByIdQuery;
exports.makeLoadByFieldQuery = makeLoadByFieldQuery;
exports.makeLoadByIdsQuery = makeLoadByIdsQuery;
exports.makeLoadAllQuery = makeLoadAllQuery;
exports.makeInsertUnnestSetQuery = makeInsertUnnestSetQuery;
exports.makeInsertValuesSetQuery = makeInsertValuesSetQuery;
exports.maxItemsPerQuery = maxItemsPerQuery;
exports.makeTableBatchSetQuery = makeTableBatchSetQuery;
exports.chunkArray = chunkArray;
exports.removeInvalidUtf8InPlace = removeInvalidUtf8InPlace;
exports.pgErrorMessageSchema = pgErrorMessageSchema;
exports.PgEncodingError = PgEncodingError;
exports.setQueryCache = setQueryCache;
exports.setOrThrow = setOrThrow;
exports.makeSchemaTableNamesQuery = makeSchemaTableNamesQuery;
exports.cacheTablePrefixLength = cacheTablePrefixLength;
exports.makeSchemaCacheTableInfoQuery = makeSchemaCacheTableInfoQuery;
exports.getConnectedPsqlExec = getConnectedPsqlExec;
exports.make = make;
/* pgErrorMessageSchema Not a pure module */