@backstage/plugin-catalog-backend
Version:
The Backstage backend plugin that provides the Backstage catalog
254 lines (248 loc) • 8.6 kB
JavaScript
var catalogModel = require('@backstage/catalog-model');
var errors = require('@backstage/errors');
var lodash = require('lodash');
var conversion = require('./conversion.cjs.js');
var metrics = require('./metrics.cjs.js');
var checkLocationKeyConflict = require('./operations/refreshState/checkLocationKeyConflict.cjs.js');
var insertUnprocessedEntity = require('./operations/refreshState/insertUnprocessedEntity.cjs.js');
var updateUnprocessedEntity = require('./operations/refreshState/updateUnprocessedEntity.cjs.js');
var util = require('./util.cjs.js');
var luxon = require('luxon');
var constants = require('../constants.cjs.js');
function _interopDefaultCompat (e) { return e && typeof e === 'object' && 'default' in e ? e : { default: e }; }
var lodash__default = /*#__PURE__*/_interopDefaultCompat(lodash);
const BATCH_SIZE = 50;
class DefaultProcessingDatabase {
constructor(options) {
this.options = options;
metrics.initDatabaseMetrics(options.database);
}
async updateProcessedEntity(txOpaque, options) {
const tx = txOpaque;
const {
id,
processedEntity,
resultHash,
errors: errors$1,
relations,
deferredEntities,
refreshKeys,
locationKey
} = options;
const configClient = tx.client.config.client;
const refreshResult = await tx("refresh_state").update({
processed_entity: JSON.stringify(processedEntity),
result_hash: resultHash,
errors: errors$1,
location_key: locationKey
}).where("entity_id", id).andWhere((inner) => {
if (!locationKey) {
return inner.whereNull("location_key");
}
return inner.where("location_key", locationKey).orWhereNull("location_key");
});
if (refreshResult === 0) {
throw new errors.ConflictError(
`Conflicting write of processing result for ${id} with location key '${locationKey}'`
);
}
const sourceEntityRef = catalogModel.stringifyEntityRef(processedEntity);
await this.addUnprocessedEntities(tx, {
entities: deferredEntities,
sourceEntityRef
});
let previousRelationRows;
if (configClient.includes("sqlite3") || configClient.includes("mysql")) {
previousRelationRows = await tx("relations").select("*").where({ originating_entity_id: id });
await tx("relations").where({ originating_entity_id: id }).delete();
} else {
previousRelationRows = await tx("relations").where({ originating_entity_id: id }).delete().returning("*");
}
const relationRows = relations.map(
({ source, target, type }) => ({
originating_entity_id: id,
source_entity_ref: catalogModel.stringifyEntityRef(source),
target_entity_ref: catalogModel.stringifyEntityRef(target),
type
})
);
await tx.batchInsert(
"relations",
this.deduplicateRelations(relationRows),
BATCH_SIZE
);
await tx("refresh_keys").where({ entity_id: id }).delete();
await tx.batchInsert(
"refresh_keys",
refreshKeys.map((k) => ({
entity_id: id,
key: util.generateTargetKey(k.key)
})),
BATCH_SIZE
);
return {
previous: {
relations: previousRelationRows
}
};
}
async updateProcessedEntityErrors(txOpaque, options) {
const tx = txOpaque;
const { id, errors, resultHash } = options;
await tx("refresh_state").update({
errors,
result_hash: resultHash
}).where("entity_id", id);
}
async updateEntityCache(txOpaque, options) {
const tx = txOpaque;
const { id, state } = options;
await tx("refresh_state").update({ cache: JSON.stringify(state ?? {}) }).where("entity_id", id);
}
async getProcessableEntities(maybeTx, request) {
const knex = maybeTx;
let itemsQuery = knex("refresh_state").select([
"entity_id",
"entity_ref",
"unprocessed_entity",
"result_hash",
"cache",
"errors",
"location_key",
"next_update_at"
]);
if (["mysql", "mysql2", "pg"].includes(knex.client.config.client)) {
itemsQuery = itemsQuery.forUpdate().skipLocked();
}
const items = await itemsQuery.where("next_update_at", "<=", knex.fn.now()).limit(request.processBatchSize).orderBy("next_update_at", "asc");
const interval = this.options.refreshInterval();
const nextUpdateAt = (refreshInterval) => {
if (knex.client.config.client.includes("sqlite3")) {
return knex.raw(`datetime('now', ?)`, [`${refreshInterval} seconds`]);
} else if (knex.client.config.client.includes("mysql")) {
return knex.raw(`now() + interval ${refreshInterval} second`);
}
return knex.raw(`now() + interval '${refreshInterval} seconds'`);
};
await knex("refresh_state").whereIn(
"entity_ref",
items.map((i) => i.entity_ref)
).update({
next_update_at: nextUpdateAt(interval)
});
return {
items: items.map(
(i) => ({
id: i.entity_id,
entityRef: i.entity_ref,
unprocessedEntity: JSON.parse(i.unprocessed_entity),
resultHash: i.result_hash || "",
nextUpdateAt: conversion.timestampToDateTime(i.next_update_at),
state: i.cache ? JSON.parse(i.cache) : void 0,
errors: i.errors,
locationKey: i.location_key
})
)
};
}
async listParents(txOpaque, options) {
const tx = txOpaque;
const rows = await tx(
"refresh_state_references"
).whereIn("target_entity_ref", options.entityRefs).select();
const entityRefs = rows.map((r) => r.source_entity_ref).filter(Boolean);
return { entityRefs };
}
async transaction(fn) {
try {
let result = void 0;
await this.options.database.transaction(
async (tx) => {
result = await fn(tx);
},
{
// If we explicitly trigger a rollback, don't fail.
doNotRejectOnRollback: true
}
);
return result;
} catch (e) {
this.options.logger.debug(`Error during transaction, ${e}`);
throw conversion.rethrowError(e);
}
}
deduplicateRelations(rows) {
return lodash__default.default.uniqBy(
rows,
(r) => `${r.source_entity_ref}:${r.target_entity_ref}:${r.type}`
);
}
/**
* Add a set of deferred entities for processing.
* The entities will be added at the front of the processing queue.
*/
async addUnprocessedEntities(txOpaque, options) {
const tx = txOpaque;
const stateReferences = new Array();
for (const { entity, locationKey } of options.entities) {
const entityRef = catalogModel.stringifyEntityRef(entity);
const hash = util.generateStableHash(entity);
const updated = await updateUnprocessedEntity.updateUnprocessedEntity({
tx,
entity,
hash,
locationKey
});
if (updated) {
stateReferences.push(entityRef);
continue;
}
const inserted = await insertUnprocessedEntity.insertUnprocessedEntity({
tx,
entity,
hash,
locationKey,
logger: this.options.logger
});
if (inserted) {
stateReferences.push(entityRef);
continue;
}
const conflictingKey = await checkLocationKeyConflict.checkLocationKeyConflict({
tx,
entityRef,
locationKey
});
if (conflictingKey) {
this.options.logger.warn(
`Detected conflicting entityRef ${entityRef} already referenced by ${conflictingKey} and now also ${locationKey}`
);
if (this.options.eventBroker && locationKey) {
const eventParams = {
topic: constants.CATALOG_CONFLICTS_TOPIC,
eventPayload: {
unprocessedEntity: entity,
entityRef,
newLocationKey: locationKey,
existingLocationKey: conflictingKey,
lastConflictAt: luxon.DateTime.now().toISO()
}
};
await this.options.eventBroker?.publish(eventParams);
}
}
}
await tx("refresh_state_references").where({ source_entity_ref: options.sourceEntityRef }).orWhereIn("target_entity_ref", stateReferences).delete();
await tx.batchInsert(
"refresh_state_references",
stateReferences.map((entityRef) => ({
source_entity_ref: options.sourceEntityRef,
target_entity_ref: entityRef
})),
BATCH_SIZE
);
}
}
exports.DefaultProcessingDatabase = DefaultProcessingDatabase;
//# sourceMappingURL=DefaultProcessingDatabase.cjs.js.map
;