@webiny/api-headless-cms-ddb-es
Version:
DynamoDB and Elasticsearch storage operations plugin for Headless CMS API.
1,658 lines (1,612 loc) • 55.7 kB
JavaScript
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault").default;
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.createEntriesStorageOperations = void 0;
var _error = _interopRequireDefault(require("@webiny/error"));
var _types = require("@webiny/api-headless-cms/types");
var _helpers = require("../../helpers");
var _configurations = require("../../configurations");
var _dbDynamodb = require("@webiny/db-dynamodb");
var _dataLoaders = require("./dataLoaders");
var _keys = require("./keys");
var _apiElasticsearch = require("@webiny/api-elasticsearch");
var _utils = require("@webiny/utils");
var _body = require("./elasticsearch/body");
var _logIgnoredEsResponseError = require("./elasticsearch/logIgnoredEsResponseError");
var _shouldIgnoreEsResponseError = require("./elasticsearch/shouldIgnoreEsResponseError");
var _recordType = require("./recordType");
var _apiHeadlessCms = require("@webiny/api-headless-cms");
var _transformations = require("./transformations");
var _convertEntryKeys = require("./transformations/convertEntryKeys");
var _constants = require("@webiny/api-headless-cms/constants");
const convertToStorageEntry = params => {
const {
model,
storageEntry
} = params;
const values = model.convertValueKeyToStorage({
fields: model.fields,
values: storageEntry.values
});
return {
...storageEntry,
values
};
};
const createEntriesStorageOperations = params => {
const {
entity,
esEntity,
elasticsearch,
plugins
} = params;
let storageOperationsCmsModelPlugin;
const getStorageOperationsCmsModelPlugin = () => {
if (storageOperationsCmsModelPlugin) {
return storageOperationsCmsModelPlugin;
}
storageOperationsCmsModelPlugin = plugins.oneByType(_apiHeadlessCms.StorageOperationsCmsModelPlugin.type);
return storageOperationsCmsModelPlugin;
};
const getStorageOperationsModel = model => {
const plugin = getStorageOperationsCmsModelPlugin();
return plugin.getModel(model);
};
const dataLoaders = new _dataLoaders.DataLoadersHandler({
entity
});
const create = async (initialModel, params) => {
const {
entry: initialEntry,
storageEntry: initialStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
const isPublished = initialEntry.status === "published";
const locked = isPublished ? true : initialEntry.locked;
initialEntry.locked = locked;
initialStorageEntry.locked = locked;
const transformer = (0, _transformations.createTransformer)({
plugins,
model,
entry: initialEntry,
storageEntry: initialStorageEntry
});
const {
entry,
storageEntry
} = transformer.transformEntryKeys();
const esEntry = transformer.transformToIndex();
const {
index: esIndex
} = _configurations.configurations.es({
model
});
const revisionKeys = {
PK: (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createRevisionSortKey)(entry)
};
const latestKeys = {
PK: (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createLatestSortKey)()
};
const publishedKeys = {
PK: (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createPublishedSortKey)()
};
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
put: [{
...storageEntry,
locked,
...revisionKeys,
TYPE: (0, _recordType.createRecordType)()
}, {
...storageEntry,
locked,
...latestKeys,
TYPE: (0, _recordType.createLatestRecordType)()
}]
});
if (isPublished) {
entityBatch.put({
...storageEntry,
locked,
...publishedKeys,
TYPE: (0, _recordType.createPublishedRecordType)()
});
}
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
} catch (ex) {
throw new _error.default(ex.message || "Could not insert entry data into the DynamoDB table.", ex.code || "CREATE_ENTRY_ERROR", {
error: ex,
entry,
storageEntry
});
}
const esLatestData = await transformer.getElasticsearchLatestEntryData();
const elasticsearchEntityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity: esEntity,
put: [{
...latestKeys,
index: esIndex,
data: esLatestData
}]
});
if (isPublished) {
const esPublishedData = await transformer.getElasticsearchPublishedEntryData();
elasticsearchEntityBatch.put({
...publishedKeys,
index: esIndex,
data: esPublishedData
});
}
try {
await elasticsearchEntityBatch.execute();
} catch (ex) {
throw new _error.default(ex.message || "Could not insert entry data into the Elasticsearch DynamoDB table.", ex.code || "CREATE_ES_ENTRY_ERROR", {
error: ex,
entry,
esEntry
});
}
return initialStorageEntry;
};
const createRevisionFrom = async (initialModel, params) => {
const {
entry: initialEntry,
storageEntry: initialStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
const transformer = (0, _transformations.createTransformer)({
plugins,
model,
entry: initialEntry,
storageEntry: initialStorageEntry
});
const {
entry,
storageEntry
} = transformer.transformEntryKeys();
const revisionKeys = {
PK: (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createRevisionSortKey)(entry)
};
const latestKeys = {
PK: (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createLatestSortKey)()
};
const publishedKeys = {
PK: (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createPublishedSortKey)()
};
// We'll need this flag below.
const isPublished = entry.status === "published";
const esLatestData = await transformer.getElasticsearchLatestEntryData();
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
put: [{
...storageEntry,
TYPE: (0, _recordType.createRecordType)(),
...revisionKeys
}, {
...storageEntry,
TYPE: (0, _recordType.createLatestRecordType)(),
...latestKeys
}]
});
if (isPublished) {
entityBatch.put({
...storageEntry,
TYPE: (0, _recordType.createPublishedRecordType)(),
...publishedKeys
});
// Unpublish previously published revision (if any).
const [publishedRevisionStorageEntry] = await dataLoaders.getPublishedRevisionByEntryId({
model,
ids: [entry.id]
});
if (publishedRevisionStorageEntry) {
entityBatch.put({
...publishedRevisionStorageEntry,
PK: (0, _keys.createPartitionKey)({
id: publishedRevisionStorageEntry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createRevisionSortKey)(publishedRevisionStorageEntry),
TYPE: (0, _recordType.createRecordType)(),
status: _types.CONTENT_ENTRY_STATUS.UNPUBLISHED
});
}
}
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
} catch (ex) {
throw new _error.default(ex.message || "Could not create revision from given entry in the DynamoDB table.", ex.code || "CREATE_REVISION_ERROR", {
error: ex,
entry,
storageEntry
});
}
const {
index: esIndex
} = _configurations.configurations.es({
model
});
const elasticsearchEntityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity: esEntity,
put: [{
...latestKeys,
index: esIndex,
data: esLatestData
}]
});
if (isPublished) {
const esPublishedData = await transformer.getElasticsearchPublishedEntryData();
elasticsearchEntityBatch.put({
...publishedKeys,
index: esIndex,
data: esPublishedData
});
}
try {
await elasticsearchEntityBatch.execute();
} catch (ex) {
throw new _error.default(ex.message || "Could not update latest entry in the DynamoDB Elasticsearch table.", ex.code || "CREATE_REVISION_ERROR", {
error: ex,
entry
});
}
/**
* There are no modifications on the entry created so just return the data.
*/
return initialStorageEntry;
};
const update = async (initialModel, params) => {
const {
entry: initialEntry,
storageEntry: initialStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
const transformer = (0, _transformations.createTransformer)({
plugins,
model,
entry: initialEntry,
storageEntry: initialStorageEntry
});
const {
entry,
storageEntry
} = transformer.transformEntryKeys();
const isPublished = entry.status === "published";
const locked = isPublished ? true : entry.locked;
const revisionKeys = {
PK: (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createRevisionSortKey)(entry)
};
const latestKeys = {
PK: (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createLatestSortKey)()
};
const publishedKeys = {
PK: (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createPublishedSortKey)()
};
/**
* We need the latest entry to check if it needs to be updated.
*/
const [latestStorageEntry] = await dataLoaders.getLatestRevisionByEntryId({
model,
ids: [entry.id]
});
const [publishedStorageEntry] = await dataLoaders.getPublishedRevisionByEntryId({
model,
ids: [entry.id]
});
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
put: [{
...storageEntry,
locked,
...revisionKeys,
TYPE: (0, _recordType.createRecordType)()
}]
});
if (isPublished) {
entityBatch.put({
...storageEntry,
locked,
...publishedKeys,
TYPE: (0, _recordType.createPublishedRecordType)()
});
}
const elasticsearchEntityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity: esEntity
});
const {
index: esIndex
} = _configurations.configurations.es({
model
});
/**
* If the latest entry is the one being updated, we need to create a new latest entry records.
*/
if (latestStorageEntry) {
const updatingLatestRevision = latestStorageEntry.id === entry.id;
if (updatingLatestRevision) {
/**
* First we update the regular DynamoDB table.
*/
entityBatch.put({
...storageEntry,
...latestKeys,
TYPE: (0, _recordType.createLatestRecordType)()
});
/**
* And then update the Elasticsearch table to propagate changes to the Elasticsearch
*/
const elasticsearchLatestData = await transformer.getElasticsearchLatestEntryData();
elasticsearchEntityBatch.put({
...latestKeys,
index: esIndex,
data: elasticsearchLatestData
});
} else {
/**
* If not updating latest revision, we still want to update the latest revision's
* entry-level meta fields to match the current revision's entry-level meta fields.
*/
const updatedEntryLevelMetaFields = (0, _constants.pickEntryMetaFields)(entry, _constants.isEntryLevelEntryMetaField);
const updatedLatestStorageEntry = {
...latestStorageEntry,
...latestKeys,
...updatedEntryLevelMetaFields
};
/**
* First we update the regular DynamoDB table. Two updates are needed:
* - one for the actual revision record
* - one for the latest record
*/
entityBatch.put({
...updatedLatestStorageEntry,
PK: (0, _keys.createPartitionKey)({
id: latestStorageEntry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createRevisionSortKey)(latestStorageEntry),
TYPE: (0, _recordType.createRecordType)()
});
entityBatch.put({
...updatedLatestStorageEntry,
TYPE: (0, _recordType.createLatestRecordType)()
});
/**
* Update the Elasticsearch table to propagate changes to the Elasticsearch.
*/
const latestEsEntry = await (0, _dbDynamodb.getClean)({
entity: esEntity,
keys: latestKeys
});
if (latestEsEntry) {
const latestEsEntryDataDecompressed = await (0, _apiElasticsearch.decompress)(plugins, latestEsEntry.data);
const updatedLatestEntry = await (0, _apiElasticsearch.compress)(plugins, {
...latestEsEntryDataDecompressed,
...updatedEntryLevelMetaFields
});
elasticsearchEntityBatch.put({
...latestKeys,
index: esIndex,
data: updatedLatestEntry
});
}
}
}
if (isPublished && publishedStorageEntry?.id === entry.id) {
const elasticsearchPublishedData = await transformer.getElasticsearchPublishedEntryData();
elasticsearchEntityBatch.put({
...publishedKeys,
index: esIndex,
data: elasticsearchPublishedData
});
}
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
} catch (ex) {
throw new _error.default(ex.message || "Could not update entry DynamoDB records.", ex.code || "UPDATE_ENTRY_ERROR", {
error: ex,
entry,
storageEntry
});
}
try {
await elasticsearchEntityBatch.execute();
} catch (ex) {
throw new _error.default(ex.message || "Could not update entry DynamoDB Elasticsearch records.", ex.code || "UPDATE_ES_ENTRY_ERROR", {
error: ex,
entry
});
}
return initialStorageEntry;
};
const move = async (initialModel, id, folderId) => {
const model = getStorageOperationsModel(initialModel);
const partitionKey = (0, _keys.createPartitionKey)({
id,
locale: model.locale,
tenant: model.tenant
});
/**
* First we need to fetch all the records in the regular DynamoDB table.
*/
const queryAllParams = {
entity,
partitionKey,
options: {
gte: " "
}
};
const latestSortKey = (0, _keys.createLatestSortKey)();
const publishedSortKey = (0, _keys.createPublishedSortKey)();
const records = await (0, _dbDynamodb.queryAll)(queryAllParams);
/**
* Then update the folderId in each record and prepare it to be stored.
*/
let latestRecord = undefined;
let publishedRecord = undefined;
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity
});
for (const record of records) {
entityBatch.put({
...record,
location: {
...record?.location,
folderId
}
});
/**
* We need to get the published and latest records, so we can update the Elasticsearch.
*/
if (record.SK === publishedSortKey) {
publishedRecord = record;
} else if (record.SK === latestSortKey) {
latestRecord = record;
}
}
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
} catch (ex) {
throw new _error.default(ex.message || "Could not move all entry records from in the DynamoDB table.", ex.code || "MOVE_ENTRY_ERROR", {
error: ex,
id
});
}
const esGetItems = [];
if (publishedRecord) {
esGetItems.push(esEntity.getBatch({
PK: partitionKey,
SK: publishedSortKey
}));
}
if (latestRecord) {
esGetItems.push(esEntity.getBatch({
PK: partitionKey,
SK: latestSortKey
}));
}
if (esGetItems.length === 0) {
return;
}
const esRecords = await (0, _dbDynamodb.batchReadAll)({
table: esEntity.table,
items: esGetItems
});
const esItems = (await Promise.all(esRecords.map(async record => {
if (!record) {
return null;
}
return {
...record,
data: await (0, _apiElasticsearch.decompress)(plugins, record.data)
};
}))).filter(Boolean);
if (esItems.length === 0) {
return;
}
const elasticsearchEntityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity: esEntity,
put: await Promise.all(esItems.map(async item => {
return {
...item,
data: await (0, _apiElasticsearch.compress)(plugins, {
...item.data,
location: {
...item.data?.location,
folderId
}
})
};
}))
});
try {
await elasticsearchEntityBatch.execute();
} catch (ex) {
throw new _error.default(ex.message || "Could not move entry DynamoDB Elasticsearch records.", ex.code || "MOVE_ES_ENTRY_ERROR", {
error: ex,
partitionKey
});
}
};
const moveToBin = async (initialModel, params) => {
const {
entry: initialEntry,
storageEntry: initialStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
const transformer = (0, _transformations.createTransformer)({
plugins,
model,
entry: initialEntry,
storageEntry: initialStorageEntry
});
const {
entry,
storageEntry
} = transformer.transformEntryKeys();
const partitionKey = (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
});
/**
* First we need to fetch all the records in the regular DynamoDB table.
*/
const queryAllParams = {
entity,
partitionKey,
options: {
gte: " "
}
};
const latestSortKey = (0, _keys.createLatestSortKey)();
const publishedSortKey = (0, _keys.createPublishedSortKey)();
const records = await (0, _dbDynamodb.queryAll)(queryAllParams);
/**
* Let's pick the `deleted` meta fields from the entry.
*/
const updatedEntryMetaFields = (0, _constants.pickEntryMetaFields)(entry, _constants.isDeletedEntryMetaField);
/**
* Then update all the records with data received.
*/
let latestRecord = undefined;
let publishedRecord = undefined;
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity
});
for (const record of records) {
entityBatch.put({
...record,
...updatedEntryMetaFields,
wbyDeleted: storageEntry.wbyDeleted,
location: storageEntry.location,
binOriginalFolderId: storageEntry.binOriginalFolderId
});
/**
* We need to get the published and latest records, so we can update the Elasticsearch.
*/
if (record.SK === publishedSortKey) {
publishedRecord = record;
} else if (record.SK === latestSortKey) {
latestRecord = record;
}
}
/**
* We write the records back to the primary DynamoDB table.
*/
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
} catch (ex) {
throw new _error.default(ex.message || "Could mark as deleted all entry records from in the DynamoDB table.", ex.code || "MOVE_ENTRY_TO_BIN_ERROR", {
error: ex,
entry,
storageEntry
});
}
/**
* We need to get the published and latest records from Elasticsearch.
*/
const esGetItems = [];
if (publishedRecord) {
esGetItems.push(esEntity.getBatch({
PK: partitionKey,
SK: publishedSortKey
}));
}
if (latestRecord) {
esGetItems.push(esEntity.getBatch({
PK: partitionKey,
SK: latestSortKey
}));
}
if (esGetItems.length === 0) {
return;
}
const esRecords = await (0, _dbDynamodb.batchReadAll)({
table: esEntity.table,
items: esGetItems
});
const esItems = (await Promise.all(esRecords.map(async record => {
if (!record) {
return null;
}
return {
...record,
data: await (0, _apiElasticsearch.decompress)(plugins, record.data)
};
}))).filter(Boolean);
if (esItems.length === 0) {
return;
}
/**
* We update all ES records with data received.
*/
const elasticsearchEntityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity: esEntity
});
for (const item of esItems) {
elasticsearchEntityBatch.put({
...item,
data: await (0, _apiElasticsearch.compress)(plugins, {
...item.data,
...updatedEntryMetaFields,
wbyDeleted: entry.wbyDeleted,
location: entry.location,
binOriginalFolderId: entry.binOriginalFolderId
})
});
}
/**
* We write the records back to the primary DynamoDB Elasticsearch table.
*/
try {
await elasticsearchEntityBatch.execute();
} catch (ex) {
throw new _error.default(ex.message || "Could not mark as deleted entry records from DynamoDB Elasticsearch table.", ex.code || "MOVE_ENTRY_TO_BIN_ERROR", {
error: ex,
entry,
storageEntry
});
}
};
const restoreFromBin = async (initialModel, params) => {
const {
entry: initialEntry,
storageEntry: initialStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
const transformer = (0, _transformations.createTransformer)({
plugins,
model,
entry: initialEntry,
storageEntry: initialStorageEntry
});
const {
entry,
storageEntry
} = transformer.transformEntryKeys();
/**
* Let's pick the `restored` meta fields from the storage entry.
*/
const updatedEntryMetaFields = (0, _constants.pickEntryMetaFields)(entry, _constants.isRestoredEntryMetaField);
const partitionKey = (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
});
/**
* First we need to fetch all the records in the regular DynamoDB table.
*/
const queryAllParams = {
entity,
partitionKey,
options: {
gte: " "
}
};
const latestSortKey = (0, _keys.createLatestSortKey)();
const publishedSortKey = (0, _keys.createPublishedSortKey)();
const records = await (0, _dbDynamodb.queryAll)(queryAllParams);
/**
* Then update all the records with data received.
*/
let latestRecord = undefined;
let publishedRecord = undefined;
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity
});
for (const record of records) {
entityBatch.put({
...record,
...updatedEntryMetaFields,
wbyDeleted: storageEntry.wbyDeleted,
location: storageEntry.location,
binOriginalFolderId: storageEntry.binOriginalFolderId
});
/**
* We need to get the published and latest records, so we can update the Elasticsearch.
*/
if (record.SK === publishedSortKey) {
publishedRecord = record;
} else if (record.SK === latestSortKey) {
latestRecord = record;
}
}
/**
* We write the records back to the primary DynamoDB table.
*/
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
} catch (ex) {
throw new _error.default(ex.message || "Could not restore all entry records from in the DynamoDB table.", ex.code || "RESTORE_ENTRY_ERROR", {
error: ex,
entry,
storageEntry
});
}
/**
* We need to get the published and latest records from Elasticsearch.
*/
const esGetItems = [];
if (publishedRecord) {
esGetItems.push(esEntity.getBatch({
PK: partitionKey,
SK: publishedSortKey
}));
}
if (latestRecord) {
esGetItems.push(esEntity.getBatch({
PK: partitionKey,
SK: latestSortKey
}));
}
const esRecords = await (0, _dbDynamodb.batchReadAll)({
table: esEntity.table,
items: esGetItems
});
const esItems = (await Promise.all(esRecords.map(async record => {
if (!record) {
return null;
}
return {
...record,
data: await (0, _apiElasticsearch.decompress)(plugins, record.data)
};
}))).filter(Boolean);
if (esItems.length === 0) {
return initialStorageEntry;
}
/**
* We update all ES records with data received.
*/
const elasticsearchEntityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity: esEntity
});
for (const item of esItems) {
elasticsearchEntityBatch.put({
...item,
data: await (0, _apiElasticsearch.compress)(plugins, {
...item.data,
...updatedEntryMetaFields,
wbyDeleted: entry.wbyDeleted,
location: entry.location,
binOriginalFolderId: entry.binOriginalFolderId
})
});
}
/**
* We write the records back to the primary DynamoDB Elasticsearch table.
*/
try {
await elasticsearchEntityBatch.execute();
} catch (ex) {
throw new _error.default(ex.message || "Could not restore entry records from DynamoDB Elasticsearch table.", ex.code || "RESTORE_ENTRY_ERROR", {
error: ex,
entry,
storageEntry
});
}
return initialStorageEntry;
};
const deleteEntry = async (initialModel, params) => {
const {
entry
} = params;
const id = entry.id || entry.entryId;
const model = getStorageOperationsModel(initialModel);
const partitionKey = (0, _keys.createPartitionKey)({
id,
locale: model.locale,
tenant: model.tenant
});
const items = await (0, _dbDynamodb.queryAll)({
entity,
partitionKey,
options: {
gte: " "
}
});
const esItems = await (0, _dbDynamodb.queryAll)({
entity: esEntity,
partitionKey,
options: {
gte: " "
}
});
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
delete: items.map(item => {
return {
PK: item.PK,
SK: item.SK
};
})
});
const elasticsearchEntityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity: esEntity,
delete: esItems.map(item => {
return {
PK: item.PK,
SK: item.SK
};
})
});
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
} catch (ex) {
throw new _error.default(ex.message || "Could not destroy entry records from DynamoDB table.", ex.code || "DELETE_ENTRY_ERROR", {
error: ex,
id
});
}
try {
await elasticsearchEntityBatch.execute();
} catch (ex) {
throw new _error.default(ex.message || "Could not destroy entry records from DynamoDB Elasticsearch table.", ex.code || "DELETE_ENTRY_ERROR", {
error: ex,
id
});
}
};
const deleteRevision = async (initialModel, params) => {
const {
entry,
latestEntry,
latestStorageEntry: initialLatestStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
const partitionKey = (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
});
const {
index
} = _configurations.configurations.es({
model
});
/**
* We need published entry to delete it if necessary.
*/
const [publishedStorageEntry] = await dataLoaders.getPublishedRevisionByEntryId({
model,
ids: [entry.id]
});
/**
* We need to delete all existing records of the given entry revision.
*/
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
delete: [{
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(entry)
}]
});
const elasticsearchEntityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity: esEntity
});
/**
* If revision we are deleting is the published one as well, we need to delete those records as well.
*/
if (publishedStorageEntry?.id === entry.id) {
entityBatch.delete({
PK: partitionKey,
SK: (0, _keys.createPublishedSortKey)()
});
elasticsearchEntityBatch.delete({
PK: partitionKey,
SK: (0, _keys.createPublishedSortKey)()
});
}
if (latestEntry && initialLatestStorageEntry) {
const latestStorageEntry = convertToStorageEntry({
storageEntry: initialLatestStorageEntry,
model
});
/**
* In the end we need to set the new latest entry.
*/
entityBatch.put({
...latestStorageEntry,
PK: partitionKey,
SK: (0, _keys.createLatestSortKey)(),
TYPE: (0, _recordType.createLatestRecordType)()
});
/**
* Also perform an update on the actual revision. This is needed
* because of updates on the entry-level meta fields.
*/
entityBatch.put({
...latestStorageEntry,
PK: (0, _keys.createPartitionKey)({
id: initialLatestStorageEntry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createRevisionSortKey)(initialLatestStorageEntry),
TYPE: (0, _recordType.createRecordType)()
});
const latestTransformer = (0, _transformations.createTransformer)({
plugins,
model,
entry: latestEntry,
storageEntry: initialLatestStorageEntry
});
const esLatestData = await latestTransformer.getElasticsearchLatestEntryData();
elasticsearchEntityBatch.put({
PK: partitionKey,
SK: (0, _keys.createLatestSortKey)(),
index,
data: esLatestData
});
}
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
} catch (ex) {
throw new _error.default(ex.message || "Could not batch write entry records to DynamoDB table.", ex.code || "DELETE_REVISION_ERROR", {
error: ex,
entry,
latestEntry,
initialLatestStorageEntry
});
}
try {
await elasticsearchEntityBatch.execute();
} catch (ex) {
throw new _error.default(ex.message || "Could not batch write entry records to DynamoDB Elasticsearch table.", ex.code || "DELETE_REVISION_ERROR", {
error: ex,
entry,
latestEntry,
initialLatestStorageEntry
});
}
};
const deleteMultipleEntries = async (initialModel, params) => {
const {
entries
} = params;
const model = getStorageOperationsModel(initialModel);
/**
* First we need all the revisions of the entries we want to delete.
*/
const revisions = await dataLoaders.getAllEntryRevisions({
model,
ids: entries
});
/**
* Then we need to construct the queries for all the revisions and entries.
*/
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity
});
const elasticsearchEntityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity: esEntity
});
for (const id of entries) {
/**
* Latest item.
*/
entityBatch.delete({
PK: (0, _keys.createPartitionKey)({
id,
locale: model.locale,
tenant: model.tenant
}),
SK: "L"
});
elasticsearchEntityBatch.delete({
PK: (0, _keys.createPartitionKey)({
id,
locale: model.locale,
tenant: model.tenant
}),
SK: "L"
});
/**
* Published item.
*/
entityBatch.delete({
PK: (0, _keys.createPartitionKey)({
id,
locale: model.locale,
tenant: model.tenant
}),
SK: "P"
});
elasticsearchEntityBatch.delete({
PK: (0, _keys.createPartitionKey)({
id,
locale: model.locale,
tenant: model.tenant
}),
SK: "P"
});
}
/**
* Exact revisions of all the entries
*/
for (const revision of revisions) {
entityBatch.delete({
PK: (0, _keys.createPartitionKey)({
id: revision.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createRevisionSortKey)({
version: revision.version
})
});
}
await entityBatch.execute();
await elasticsearchEntityBatch.execute();
};
const list = async (initialModel, params) => {
const model = getStorageOperationsModel(initialModel);
const limit = (0, _apiElasticsearch.createLimit)(params.limit, 50);
const {
index
} = _configurations.configurations.es({
model
});
const body = (0, _body.createElasticsearchBody)({
model,
params: {
...params,
limit,
after: (0, _apiElasticsearch.decodeCursor)(params.after)
},
plugins
});
let response;
try {
response = await elasticsearch.search({
index,
body
});
} catch (error) {
/**
* We will silently ignore the `index_not_found_exception` error and return an empty result set.
* This is because the index might not exist yet, and we don't want to throw an error.
*/
if ((0, _shouldIgnoreEsResponseError.shouldIgnoreEsResponseError)(error)) {
(0, _logIgnoredEsResponseError.logIgnoredEsResponseError)({
error,
model,
indexName: index
});
return {
hasMoreItems: false,
totalCount: 0,
cursor: null,
items: []
};
}
throw new _error.default(error.message, error.code || "ELASTICSEARCH_ERROR", {
error,
index,
body,
model
});
}
const {
hits,
total
} = response?.body?.hits || {};
const items = (0, _helpers.extractEntriesFromIndex)({
plugins,
model,
entries: hits.map(item => item._source)
}).map(item => {
return (0, _convertEntryKeys.convertEntryKeysFromStorage)({
model,
entry: item
});
});
const hasMoreItems = items.length > limit;
if (hasMoreItems) {
/**
* Remove the last item from results, we don't want to include it.
*/
items.pop();
}
/**
* Cursor is the `sort` value of the last item in the array.
* https://www.elastic.co/guide/en/elasticsearch/reference/current/paginate-search-results.html#search-after
*/
const cursor = items.length > 0 ? (0, _apiElasticsearch.encodeCursor)(hits[items.length - 1].sort) || null : null;
return {
hasMoreItems,
totalCount: total.value,
cursor,
items
};
};
const get = async (initialModel, params) => {
const model = getStorageOperationsModel(initialModel);
const {
items
} = await list(model, {
...params,
limit: 1
});
return items.shift() || null;
};
const publish = async (initialModel, params) => {
const {
entry: initialEntry,
storageEntry: initialStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
const transformer = (0, _transformations.createTransformer)({
plugins,
model,
entry: initialEntry,
storageEntry: initialStorageEntry
});
const {
entry,
storageEntry
} = transformer.transformEntryKeys();
const revisionKeys = {
PK: (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createRevisionSortKey)(entry)
};
const latestKeys = {
PK: (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createLatestSortKey)()
};
const publishedKeys = {
PK: (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createPublishedSortKey)()
};
let latestEsEntry = null;
try {
latestEsEntry = await (0, _dbDynamodb.getClean)({
entity: esEntity,
keys: latestKeys
});
} catch (ex) {
throw new _error.default(ex.message || "Could not read Elasticsearch latest data.", ex.code || "PUBLISH_LATEST_READ", {
error: ex,
latestKeys: latestKeys,
publishedKeys: publishedKeys
});
}
if (!latestEsEntry) {
throw new _error.default(`Could not publish entry. Could not load latest ("L") record (ES table).`, "PUBLISH_ERROR", {
entry
});
}
/**
* We need the latest entry to check if it needs to be updated as well in the Elasticsearch.
*/
const [latestStorageEntry] = await dataLoaders.getLatestRevisionByEntryId({
model,
ids: [entry.id]
});
if (!latestStorageEntry) {
throw new _error.default(`Could not publish entry. Could not load latest ("L") record.`, "PUBLISH_ERROR", {
entry
});
}
/**
* We need currently published entry to check if need to remove it.
*/
const [publishedStorageEntry] = await dataLoaders.getPublishedRevisionByEntryId({
model,
ids: [entry.id]
});
// 1. Update REV# and P records with new data.
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
put: [{
...storageEntry,
...revisionKeys,
TYPE: (0, _recordType.createRecordType)()
}, {
...storageEntry,
...publishedKeys,
TYPE: (0, _recordType.createPublishedRecordType)()
}]
});
const elasticsearchEntityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity: esEntity
});
const {
index: esIndex
} = _configurations.configurations.es({
model
});
// 2. When it comes to the latest record, we need to perform a couple of different
// updates, based on whether the entry being published is the latest revision or not.
const publishedRevisionId = publishedStorageEntry?.id;
const publishingLatestRevision = latestStorageEntry?.id === entry.id;
if (publishingLatestRevision) {
// 2.1 If we're publishing the latest revision, we first need to update the L record.
entityBatch.put({
...storageEntry,
...latestKeys
});
// 2.2 Additionally, if we have a previously published entry, we need to mark it as unpublished.
// Note that we need to take re-publishing into account (same published revision being
// published again), in which case the below code does not apply. This is because the
// required updates were already applied above.
if (publishedStorageEntry) {
const isRepublishing = publishedStorageEntry.id === entry.id;
if (!isRepublishing) {
/**
* Update currently published entry (unpublish it)
*/
entityBatch.put({
...publishedStorageEntry,
status: _types.CONTENT_ENTRY_STATUS.UNPUBLISHED,
TYPE: (0, _recordType.createRecordType)(),
PK: (0, _keys.createPartitionKey)(publishedStorageEntry),
SK: (0, _keys.createRevisionSortKey)(publishedStorageEntry)
});
}
}
} else {
// 2.3 If the published revision is not the latest one, the situation is a bit
// more complex. We first need to update the L and REV# records with the new
// values of *only entry-level* meta fields.
const updatedEntryLevelMetaFields = (0, _constants.pickEntryMetaFields)(entry, _constants.isEntryLevelEntryMetaField);
// 2.4 Update L record. Apart from updating the entry-level meta fields, we also need
// to change the status from "published" to "unpublished" (if the status is set to "published").
let latestRevisionStatus = latestStorageEntry.status;
if (latestRevisionStatus === _types.CONTENT_ENTRY_STATUS.PUBLISHED) {
latestRevisionStatus = _types.CONTENT_ENTRY_STATUS.UNPUBLISHED;
}
const latestStorageEntryFields = {
...latestStorageEntry,
...updatedEntryLevelMetaFields,
status: latestRevisionStatus
};
entityBatch.put({
...latestStorageEntryFields,
PK: (0, _keys.createPartitionKey)(latestStorageEntry),
SK: (0, _keys.createLatestSortKey)(),
TYPE: (0, _recordType.createLatestRecordType)()
});
// 2.5 Update REV# record.
entityBatch.put({
...latestStorageEntryFields,
PK: (0, _keys.createPartitionKey)(latestStorageEntry),
SK: (0, _keys.createRevisionSortKey)(latestStorageEntry),
TYPE: (0, _recordType.createRecordType)()
});
// 2.6 Additionally, if we have a previously published entry, we need to mark it as unpublished.
// Note that we need to take re-publishing into account (same published revision being
// published again), in which case the below code does not apply. This is because the
// required updates were already applied above.
if (publishedStorageEntry) {
const isRepublishing = publishedStorageEntry.id === entry.id;
const publishedRevisionDifferentFromLatest = publishedRevisionId !== latestStorageEntry.id;
if (!isRepublishing && publishedRevisionDifferentFromLatest) {
entityBatch.put({
...publishedStorageEntry,
PK: (0, _keys.createPartitionKey)(publishedStorageEntry),
SK: (0, _keys.createRevisionSortKey)(publishedStorageEntry),
TYPE: (0, _recordType.createRecordType)(),
status: _types.CONTENT_ENTRY_STATUS.UNPUBLISHED
});
}
}
}
// 3. Update records in ES -> DDB table.
/**
* Update the published revision entry in ES.
*/
const esPublishedData = await transformer.getElasticsearchPublishedEntryData();
elasticsearchEntityBatch.put({
...publishedKeys,
index: esIndex,
data: esPublishedData
});
/**
* Need to decompress the data from Elasticsearch DynamoDB table.
*
* No need to transform it for the storage because it was fetched
* directly from the Elasticsearch table, where it sits transformed.
*/
const latestEsEntryDataDecompressed = await (0, _apiElasticsearch.decompress)(plugins, latestEsEntry.data);
if (publishingLatestRevision) {
const updatedMetaFields = (0, _constants.pickEntryMetaFields)(entry);
const latestTransformer = (0, _transformations.createTransformer)({
plugins,
model,
transformedToIndex: {
...latestEsEntryDataDecompressed,
status: _types.CONTENT_ENTRY_STATUS.PUBLISHED,
locked: true,
...updatedMetaFields
}
});
elasticsearchEntityBatch.put({
index: esIndex,
PK: (0, _keys.createPartitionKey)(latestEsEntryDataDecompressed),
SK: (0, _keys.createLatestSortKey)(),
data: await latestTransformer.getElasticsearchLatestEntryData()
});
} else {
const updatedEntryLevelMetaFields = (0, _constants.pickEntryMetaFields)(entry, _constants.isEntryLevelEntryMetaField);
/**
* Update the Elasticsearch table to propagate changes to the Elasticsearch.
*/
const latestEsEntry = await (0, _dbDynamodb.getClean)({
entity: esEntity,
keys: latestKeys
});
if (latestEsEntry) {
const latestEsEntryDataDecompressed = await (0, _apiElasticsearch.decompress)(plugins, latestEsEntry.data);
let latestRevisionStatus = latestEsEntryDataDecompressed.status;
if (latestRevisionStatus === _types.CONTENT_ENTRY_STATUS.PUBLISHED) {
latestRevisionStatus = _types.CONTENT_ENTRY_STATUS.UNPUBLISHED;
}
const updatedLatestEntry = await (0, _apiElasticsearch.compress)(plugins, {
...latestEsEntryDataDecompressed,
...updatedEntryLevelMetaFields,
status: latestRevisionStatus
});
elasticsearchEntityBatch.put({
...latestKeys,
index: esIndex,
data: updatedLatestEntry
});
}
}
/**
* Finally, execute regular table batch.
*/
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
} catch (ex) {
throw new _error.default(ex.message || "Could not store publish entry records in DynamoDB table.", ex.code || "PUBLISH_ERROR", {
error: ex,
entry,
latestStorageEntry,
publishedStorageEntry
});
}
/**
* And Elasticsearch table batch.
*/
try {
await elasticsearchEntityBatch.execute();
} catch (ex) {
throw new _error.default(ex.message || "Could not store publish entry records in DynamoDB Elasticsearch table.", ex.code || "PUBLISH_ES_ERROR", {
error: ex,
entry,
latestStorageEntry,
publishedStorageEntry
});
}
return initialStorageEntry;
};
const unpublish = async (initialModel, params) => {
const {
entry: initialEntry,
storageEntry: initialStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
const transformer = (0, _transformations.createTransformer)({
plugins,
model,
entry: initialEntry,
storageEntry: initialStorageEntry
});
const {
entry,
storageEntry
} = await transformer.transformEntryKeys();
/**
* We need the latest entry to check if it needs to be updated.
*/
const [latestStorageEntry] = await dataLoaders.getLatestRevisionByEntryId({
model,
ids: [entry.id]
});
const partitionKey = (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
});
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
put: [{
...storageEntry,
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(entry),
TYPE: (0, _recordType.createRecordType)()
}],
delete: [{
PK: partitionKey,
SK: (0, _keys.createPublishedSortKey)()
}]
});
const elasticsearchEntityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity: esEntity,
delete: [{
PK: partitionKey,
SK: (0, _keys.createPublishedSortKey)()
}]
});
/**
* If we are unpublishing the latest revision, let's also update the latest revision entry's status in both DynamoDB tables.
*/
if (latestStorageEntry?.id === entry.id) {
const {
index
} = _configurations.configurations.es({
model
});
entityBatch.put({
...storageEntry,
PK: partitionKey,
SK: (0, _keys.createLatestSortKey)(),
TYPE: (0, _recordType.createLatestRecordType)()
});
const esLatestData = await transformer.getElasticsearchLatestEntryData();
elasticsearchEntityBatch.put({
PK: partitionKey,
SK: (0, _keys.createLatestSortKey)(),
index,
data: esLatestData
});
}
/**
* Finally, execute regular table batch.
*/
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
} catch (ex) {
throw new _error.default(ex.message || "Could not store unpublished entry records in DynamoDB table.", ex.code || "UNPUBLISH_ERROR", {
entry,
storageEntry
});
}
/**
* And Elasticsearch table batch.
*/
try {
await elasticsearchEntityBatch.execute();
} catch (ex) {
throw new _error.default(ex.message || "Could not store unpublished entry records in DynamoDB Elasticsearch table.", ex.code || "UNPUBLISH_ERROR", {
entry,
storageEntry
});
}
return initialStorageEntry;
};
const getLatestRevisionByEntryId = async (initialModel, params) => {
const model = getStorageOperationsModel(initialModel);
const [entry] = await dataLoaders.getLatestRevisionByEntryId({
model,
ids: [params.id]
});
if (!entry) {
return null;
}
return (0, _convertEntryKeys.convertEntryKeysFromStorage)({
model,
entry
});
};
const getPublishedRevisionByEntryId = async (initialModel, params) => {
const model = getStorageOperationsModel(initialModel);
const [entry] = await dataLoaders.getPublishedRevisionByEntryId({
model,
ids: [params.id]
});
if (