@webiny/api-headless-cms-ddb
Version:
DynamoDB storage operations plugin for Headless CMS API.
1,306 lines (1,276 loc) • 40.2 kB
JavaScript
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault").default;
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.createEntriesStorageOperations = void 0;
var _error = _interopRequireDefault(require("@webiny/error"));
var _dataLoaders = require("./dataLoaders");
var _types = require("@webiny/api-headless-cms/types");
var _keys = require("./keys");
var _dbDynamodb = require("@webiny/db-dynamodb");
var _cursor = require("@webiny/utils/cursor");
var _zeroPad = require("@webiny/utils/zeroPad");
var _apiHeadlessCms = require("@webiny/api-headless-cms");
var _createFields = require("./filtering/createFields");
var _filtering = require("./filtering");
var _constants = require("@webiny/api-headless-cms/constants");
const createType = () => {
return "cms.entry";
};
const createLatestType = () => {
return `${createType()}.l`;
};
const createPublishedType = () => {
return `${createType()}.p`;
};
const convertToStorageEntry = params => {
const {
model,
storageEntry
} = params;
const values = model.convertValueKeyToStorage({
fields: model.fields,
values: storageEntry.values
});
return {
...storageEntry,
values
};
};
const convertFromStorageEntry = params => {
const {
model,
storageEntry
} = params;
const values = model.convertValueKeyFromStorage({
fields: model.fields,
values: storageEntry.values
});
return {
...storageEntry,
values
};
};
const MAX_LIST_LIMIT = 1000000;
const createEntriesStorageOperations = params => {
const {
entity,
plugins
} = params;
let storageOperationsCmsModelPlugin;
const getStorageOperationsCmsModelPlugin = () => {
if (storageOperationsCmsModelPlugin) {
return storageOperationsCmsModelPlugin;
}
storageOperationsCmsModelPlugin = plugins.oneByType(_apiHeadlessCms.StorageOperationsCmsModelPlugin.type);
return storageOperationsCmsModelPlugin;
};
const getStorageOperationsModel = model => {
const plugin = getStorageOperationsCmsModelPlugin();
return plugin.getModel(model);
};
const dataLoaders = new _dataLoaders.DataLoadersHandler({
entity
});
const createStorageTransformCallable = model => {
// Cache StorageTransformPlugin to optimize execution.
const storageTransformPlugins = plugins.byType(_apiHeadlessCms.StorageTransformPlugin.type).reduce((collection, plugin) => {
collection[plugin.fieldType] = plugin;
return collection;
}, {});
return (field, value) => {
const plugin = storageTransformPlugins[field.type];
if (!plugin) {
return value;
}
return plugin.fromStorage({
model,
field,
value,
getStoragePlugin(fieldType) {
return storageTransformPlugins[fieldType] || storageTransformPlugins["*"];
},
plugins
});
};
};
const create = async (initialModel, params) => {
const {
entry,
storageEntry: initialStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
const partitionKey = (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
});
const isPublished = entry.status === "published";
const locked = isPublished ? true : entry.locked;
const storageEntry = convertToStorageEntry({
model,
storageEntry: initialStorageEntry
});
/**
* We need to:
* - create new main entry item
* - create new or update the latest entry item
*/
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
put: [{
...storageEntry,
locked,
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(entry),
TYPE: createType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
}, {
...storageEntry,
locked,
PK: partitionKey,
SK: (0, _keys.createLatestSortKey)(),
TYPE: createLatestType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
}]
});
/**
* We need to create published entry if
*/
if (isPublished) {
entityBatch.put({
...storageEntry,
locked,
PK: partitionKey,
SK: (0, _keys.createPublishedSortKey)(),
TYPE: createLatestType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "P"),
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
});
}
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
} catch (ex) {
throw new _error.default(ex.message || "Could not insert data into the DynamoDB.", ex.code || "CREATE_ENTRY_ERROR", {
error: ex,
entry
});
}
return initialStorageEntry;
};
const createRevisionFrom = async (initialModel, params) => {
const {
entry,
storageEntry: initialStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
const partitionKey = (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
});
const storageEntry = convertToStorageEntry({
storageEntry: initialStorageEntry,
model
});
/**
* We need to:
* - create the main entry item
* - update the latest entry item to the current one
* - if the entry's status was set to "published":
* - update the published entry item to the current one
* - unpublish previously published revision (if any)
*/
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
put: [{
...storageEntry,
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(storageEntry),
TYPE: createType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
}, {
...storageEntry,
PK: partitionKey,
SK: (0, _keys.createLatestSortKey)(),
TYPE: createLatestType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
}]
});
const isPublished = entry.status === "published";
if (isPublished) {
entityBatch.put({
...storageEntry,
PK: partitionKey,
SK: (0, _keys.createPublishedSortKey)(),
TYPE: createPublishedType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "P"),
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
});
// Unpublish previously published revision (if any).
const [publishedRevisionStorageEntry] = await dataLoaders.getPublishedRevisionByEntryId({
model,
ids: [entry.id]
});
if (publishedRevisionStorageEntry) {
entityBatch.put({
...publishedRevisionStorageEntry,
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(publishedRevisionStorageEntry),
TYPE: createType(),
status: _types.CONTENT_ENTRY_STATUS.UNPUBLISHED,
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
GSI1_SK: (0, _keys.createGSISortKey)(publishedRevisionStorageEntry)
});
}
}
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
} catch (ex) {
throw new _error.default(ex.message || "Could not create revision from given entry.", ex.code || "CREATE_REVISION_ERROR", {
error: ex,
entry,
storageEntry
});
}
/**
* There are no modifications on the entry created so just return the data.
*/
return initialStorageEntry;
};
const update = async (initialModel, params) => {
const {
entry,
storageEntry: initialStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
const partitionKey = (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
});
const isPublished = entry.status === "published";
const locked = isPublished ? true : entry.locked;
const storageEntry = convertToStorageEntry({
model,
storageEntry: initialStorageEntry
});
/**
* We need to:
* - update the current entry
* - update the latest entry if the current entry is the latest one
*/
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
put: [{
...storageEntry,
locked,
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(storageEntry),
TYPE: createType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
}]
});
if (isPublished) {
entityBatch.put({
...storageEntry,
locked,
PK: partitionKey,
SK: (0, _keys.createPublishedSortKey)(),
TYPE: createPublishedType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "P"),
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
});
}
/**
* We need the latest entry to update it as well if necessary.
*/
const latestStorageEntry = await getLatestRevisionByEntryId(model, entry);
if (latestStorageEntry) {
const updatingLatestRevision = latestStorageEntry.id === entry.id;
if (updatingLatestRevision) {
entityBatch.put({
...storageEntry,
locked,
PK: partitionKey,
SK: (0, _keys.createLatestSortKey)(),
TYPE: createLatestType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
GSI1_SK: (0, _keys.createGSISortKey)(entry)
});
} else {
/**
* If not updating latest revision, we still want to update the latest revision's
* entry-level meta fields to match the current revision's entry-level meta fields.
*/
const updatedEntryLevelMetaFields = (0, _constants.pickEntryMetaFields)(entry, _constants.isEntryLevelEntryMetaField);
/**
* First we update the regular DynamoDB table. Two updates are needed:
* - one for the actual revision record
* - one for the latest record
*/
entityBatch.put({
...latestStorageEntry,
...updatedEntryLevelMetaFields,
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(latestStorageEntry),
TYPE: createType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
GSI1_SK: (0, _keys.createGSISortKey)(latestStorageEntry)
});
entityBatch.put({
...latestStorageEntry,
...updatedEntryLevelMetaFields,
PK: partitionKey,
SK: (0, _keys.createLatestSortKey)(),
TYPE: createLatestType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
GSI1_SK: (0, _keys.createGSISortKey)(latestStorageEntry)
});
}
}
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
return initialStorageEntry;
} catch (ex) {
throw new _error.default(ex.message || "Could not update entry.", ex.code || "UPDATE_ERROR", {
error: ex,
entry,
latestStorageEntry
});
}
};
const move = async (initialModel, id, folderId) => {
/**
* We need to:
* - load all the revisions of the entry, including published and latest
* - update all the revisions (published and latest ) of the entry with new folderId
*/
const model = getStorageOperationsModel(initialModel);
/**
* First we need to load all the revisions and published / latest entry.
*/
const queryAllParams = {
entity,
partitionKey: (0, _keys.createPartitionKey)({
id,
locale: model.locale,
tenant: model.tenant
}),
options: {
gte: " "
}
};
const records = await (0, _dbDynamodb.queryAll)(queryAllParams);
/**
* Then create the batch writes for the DynamoDB, with the updated folderId.
*/
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
put: records.map(item => {
return {
...item,
location: {
...item.location,
folderId
}
};
})
});
/**
* And finally write it...
*/
try {
await entityBatch.execute();
} catch (ex) {
throw _error.default.from(ex, {
message: "Could not move records to a new folder.",
data: {
id,
folderId
}
});
}
};
const moveToBin = async (initialModel, params) => {
const {
entry,
storageEntry: initialStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
/**
* First we need to load all the revisions and published / latest entries.
*/
const queryAllParams = {
entity,
partitionKey: (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
}),
options: {
gte: " "
}
};
let records = [];
try {
records = await (0, _dbDynamodb.queryAll)(queryAllParams);
} catch (ex) {
throw new _error.default(ex.message || "Could not load all records.", ex.code || "LOAD_ALL_RECORDS_ERROR", {
error: ex,
id: entry.id
});
}
if (records.length === 0) {
return;
}
const storageEntry = convertToStorageEntry({
model,
storageEntry: initialStorageEntry
});
/**
* Let's pick the `deleted` meta fields from the storage entry.
*/
const updatedDeletedMetaFields = (0, _constants.pickEntryMetaFields)(storageEntry, _constants.isDeletedEntryMetaField);
/**
* Then create the batch writes for the DynamoDB, with the updated data.
*/
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
put: records.map(record => {
return {
...record,
...updatedDeletedMetaFields,
wbyDeleted: storageEntry.wbyDeleted,
location: storageEntry.location,
binOriginalFolderId: storageEntry.binOriginalFolderId
};
})
});
/**
* And finally write it...
*/
try {
await entityBatch.execute();
} catch (ex) {
throw new _error.default(ex.message || "Could not move the entry to the bin.", ex.code || "MOVE_ENTRY_TO_BIN_ERROR", {
error: ex,
entry,
storageEntry
});
}
};
const deleteEntry = async (initialModel, params) => {
const {
entry
} = params;
const id = entry.id || entry.entryId;
const model = getStorageOperationsModel(initialModel);
const queryAllParams = {
entity,
partitionKey: (0, _keys.createPartitionKey)({
id,
locale: model.locale,
tenant: model.tenant
}),
options: {
gte: " "
}
};
let records = [];
try {
records = await (0, _dbDynamodb.queryAll)(queryAllParams);
} catch (ex) {
throw new _error.default(ex.message || "Could not load all records.", ex.code || "LOAD_ALL_RECORDS_ERROR", {
error: ex,
id
});
}
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
delete: records.map(item => {
return {
PK: item.PK,
SK: item.SK
};
})
});
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
} catch (ex) {
throw new _error.default(ex.message || "Could not delete the entry.", ex.code || "DELETE_ENTRY_ERROR", {
error: ex,
partitionKey: queryAllParams.partitionKey,
id
});
}
};
const restoreFromBin = async (initialModel, params) => {
const {
entry,
storageEntry: initialStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
/**
* First we need to load all the revisions and published / latest entries.
*/
const queryAllParams = {
entity,
partitionKey: (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
}),
options: {
gte: " "
}
};
let records = [];
try {
records = await (0, _dbDynamodb.queryAll)(queryAllParams);
} catch (ex) {
throw new _error.default(ex.message || "Could not load all records.", ex.code || "LOAD_ALL_RECORDS_ERROR", {
error: ex,
id: entry.id
});
}
if (records.length === 0) {
return initialStorageEntry;
}
const storageEntry = convertToStorageEntry({
model,
storageEntry: initialStorageEntry
});
/**
* Let's pick the `restored` meta fields from the storage entry.
*/
const updatedRestoredMetaFields = (0, _constants.pickEntryMetaFields)(storageEntry, _constants.isRestoredEntryMetaField);
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
put: records.map(record => {
return {
...record,
...updatedRestoredMetaFields,
wbyDeleted: storageEntry.wbyDeleted,
location: storageEntry.location,
binOriginalFolderId: storageEntry.binOriginalFolderId
};
})
});
/**
* And finally write it...
*/
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
return initialStorageEntry;
} catch (ex) {
throw new _error.default(ex.message || "Could not restore the entry from the bin.", ex.code || "RESTORE_ENTRY_ERROR", {
error: ex,
entry,
storageEntry
});
}
};
const deleteRevision = async (initialModel, params) => {
const {
entry,
latestEntry,
latestStorageEntry: initialLatestStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
const partitionKey = (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
});
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
delete: [{
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(entry)
}]
});
const publishedStorageEntry = await getPublishedRevisionByEntryId(model, entry);
/**
* If revision we are deleting is the published one as well, we need to delete those records as well.
*/
if (publishedStorageEntry && entry.id === publishedStorageEntry.id) {
entityBatch.delete({
PK: partitionKey,
SK: (0, _keys.createPublishedSortKey)()
});
}
if (initialLatestStorageEntry) {
const latestStorageEntry = convertToStorageEntry({
storageEntry: initialLatestStorageEntry,
model
});
entityBatch.put({
...latestStorageEntry,
PK: partitionKey,
SK: (0, _keys.createLatestSortKey)(),
TYPE: createLatestType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
GSI1_SK: (0, _keys.createGSISortKey)(latestStorageEntry)
});
// Do an update on the latest revision. We need to update the latest revision's
// entry-level meta fields to match the previous revision's entry-level meta fields.
entityBatch.put({
...latestStorageEntry,
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(initialLatestStorageEntry),
TYPE: createType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
GSI1_SK: (0, _keys.createGSISortKey)(initialLatestStorageEntry)
});
}
try {
entityBatch.execute();
dataLoaders.clearAll({
model
});
} catch (ex) {
throw new _error.default(ex.message, ex.code, {
error: ex,
entry,
latestEntry
});
}
};
const deleteMultipleEntries = async (initialModel, params) => {
const {
entries
} = params;
const model = getStorageOperationsModel(initialModel);
/**
* First we need all the revisions of the entries we want to delete.
*/
const revisions = await dataLoaders.getAllEntryRevisions({
model,
ids: entries
});
/**
* Then we need to construct the queries for all the revisions and entries.
*/
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity
});
for (const id of entries) {
const partitionKey = (0, _keys.createPartitionKey)({
id,
locale: model.locale,
tenant: model.tenant
});
entityBatch.delete({
PK: partitionKey,
SK: "L"
});
entityBatch.delete({
PK: partitionKey,
SK: "P"
});
}
/**
* Exact revisions of all the entries
*/
for (const revision of revisions) {
entityBatch.delete({
PK: (0, _keys.createPartitionKey)({
id: revision.id,
locale: model.locale,
tenant: model.tenant
}),
SK: (0, _keys.createRevisionSortKey)({
version: revision.version
})
});
}
await entityBatch.execute();
};
const getLatestRevisionByEntryId = async (initialModel, params) => {
const model = getStorageOperationsModel(initialModel);
const items = await dataLoaders.getLatestRevisionByEntryId({
model,
ids: [params.id]
});
const item = items.shift() || null;
if (!item) {
return null;
}
return convertFromStorageEntry({
storageEntry: item,
model
});
};
const getPublishedRevisionByEntryId = async (initialModel, params) => {
const model = getStorageOperationsModel(initialModel);
const items = await dataLoaders.getPublishedRevisionByEntryId({
model,
ids: [params.id]
});
const item = items.shift() || null;
if (!item) {
return null;
}
return convertFromStorageEntry({
storageEntry: item,
model
});
};
const getRevisionById = async (initialModel, params) => {
const model = getStorageOperationsModel(initialModel);
const items = await dataLoaders.getRevisionById({
model,
ids: [params.id]
});
const item = items.shift() || null;
if (!item) {
return null;
}
return convertFromStorageEntry({
storageEntry: item,
model
});
};
const getRevisions = async (initialModel, params) => {
const model = getStorageOperationsModel(initialModel);
const items = await dataLoaders.getAllEntryRevisions({
model,
ids: [params.id]
});
return items.map(item => {
return convertFromStorageEntry({
storageEntry: item,
model
});
});
};
const getByIds = async (initialModel, params) => {
const model = getStorageOperationsModel(initialModel);
const items = await dataLoaders.getRevisionById({
model,
ids: params.ids
});
return items.map(item => {
return convertFromStorageEntry({
storageEntry: item,
model
});
});
};
const getLatestByIds = async (initialModel, params) => {
const model = getStorageOperationsModel(initialModel);
const items = await dataLoaders.getLatestRevisionByEntryId({
model,
ids: params.ids
});
return items.map(item => {
return convertFromStorageEntry({
storageEntry: item,
model
});
});
};
const getPublishedByIds = async (initialModel, params) => {
const model = getStorageOperationsModel(initialModel);
const items = await dataLoaders.getPublishedRevisionByEntryId({
model,
ids: params.ids
});
return items.map(item => {
return convertFromStorageEntry({
storageEntry: item,
model
});
});
};
const getPreviousRevision = async (initialModel, params) => {
const model = getStorageOperationsModel(initialModel);
const {
entryId,
version
} = params;
const queryParams = {
entity,
partitionKey: (0, _keys.createPartitionKey)({
tenant: model.tenant,
locale: model.locale,
id: entryId
}),
options: {
lt: `REV#${(0, _zeroPad.zeroPad)(version)}`,
/**
* We need to have extra checks because DynamoDB will return published or latest record if there is no REV# record.
*/
filters: [{
attr: "TYPE",
eq: createType()
}, {
attr: "version",
lt: version
}],
reverse: true
}
};
try {
const result = await (0, _dbDynamodb.queryOne)(queryParams);
const storageEntry = (0, _dbDynamodb.cleanupItem)(entity, result);
if (!storageEntry) {
return null;
}
return convertFromStorageEntry({
storageEntry,
model
});
} catch (ex) {
throw new _error.default(ex.message || "Could not get previous version of given entry.", ex.code || "GET_PREVIOUS_VERSION_ERROR", {
...params,
error: ex,
partitionKey: queryParams.partitionKey,
options: queryParams.options,
model
});
}
};
const list = async (initialModel, params) => {
const model = getStorageOperationsModel(initialModel);
const {
limit: initialLimit = 10,
where: initialWhere,
after,
sort: sortBy,
fields,
search
} = params;
const limit = initialLimit <= 0 || initialLimit >= MAX_LIST_LIMIT ? MAX_LIST_LIMIT : initialLimit;
const type = initialWhere.published ? "P" : "L";
const queryAllParams = {
entity,
partitionKey: (0, _keys.createGSIPartitionKey)(model, type),
options: {
index: "GSI1",
gte: " "
}
};
let storageEntries = [];
try {
storageEntries = await (0, _dbDynamodb.queryAll)(queryAllParams);
} catch (ex) {
throw new _error.default(ex.message, "QUERY_ENTRIES_ERROR", {
error: ex,
partitionKey: queryAllParams.partitionKey,
options: queryAllParams.options
});
}
if (storageEntries.length === 0) {
return {
hasMoreItems: false,
totalCount: 0,
cursor: null,
items: []
};
}
const where = {
...initialWhere
};
delete where["published"];
delete where["latest"];
/**
* We need an object containing field, transformers and paths.
* Just build it here and pass on into other methods that require it to avoid mapping multiple times.
*/
const modelFields = (0, _createFields.createFields)({
plugins,
fields: model.fields
});
const fromStorage = createStorageTransformCallable(model);
/**
* Let's transform records from storage ones to regular ones, so we do not need to do it later.
*
* This is always being done, but at least its in parallel.
*/
const records = await Promise.all(storageEntries.map(async storageEntry => {
const entry = convertFromStorageEntry({
storageEntry,
model
});
for (const field of model.fields) {
entry.values[field.fieldId] = await fromStorage(field, entry.values[field.fieldId]);
}
return entry;
}));
/**
* Filter the read items via the code.
* It will build the filters out of the where input and transform the values it is using.
*/
const filteredItems = (0, _filtering.filter)({
items: records,
where,
plugins,
fields: modelFields,
fullTextSearch: {
term: search,
fields: fields ? fields.getAllPaths() : []
}
});
const totalCount = filteredItems.length;
/**
* Sorting is also done via the code.
* It takes the sort input and sorts by it via the lodash sortBy method.
*/
const sortedItems = (0, _filtering.sort)({
model,
plugins,
items: filteredItems,
sort: sortBy,
fields: modelFields
});
const start = parseInt((0, _cursor.decodeCursor)(after) || "0") || 0;
const hasMoreItems = totalCount > start + limit;
const end = limit > totalCount + start + limit ? undefined : start + limit;
const slicedItems = sortedItems.slice(start, end);
/**
* Although we do not need a cursor here, we will use it as such to keep it standardized.
* Number is simply encoded.
*/
const cursor = (0, _cursor.encodeCursor)(`${start + limit}`);
return {
hasMoreItems,
totalCount,
cursor,
items: (0, _dbDynamodb.cleanupItems)(entity, slicedItems)
};
};
const get = async (initialModel, params) => {
const model = getStorageOperationsModel(initialModel);
const {
items
} = await list(model, {
...params,
limit: 1
});
return items.shift() || null;
};
const publish = async (initialModel, params) => {
const {
entry,
storageEntry: initialStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
const partitionKey = (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
});
/**
* We need the latest and published entries to see if something needs to be updated alongside the publishing one.
*/
const initialLatestStorageEntry = await getLatestRevisionByEntryId(model, entry);
if (!initialLatestStorageEntry) {
throw new _error.default(`Could not publish entry. Could not load latest ("L") record.`, "PUBLISH_ERROR", {
entry
});
}
const initialPublishedStorageEntry = await getPublishedRevisionByEntryId(model, entry);
const storageEntry = convertToStorageEntry({
model,
storageEntry: initialStorageEntry
});
// 1. Update REV# and P records with new data.
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
put: [{
...storageEntry,
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(entry),
TYPE: createType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
GSI1_SK: (0, _keys.createGSISortKey)(entry)
}, {
...storageEntry,
PK: partitionKey,
SK: (0, _keys.createPublishedSortKey)(),
TYPE: createPublishedType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "P"),
GSI1_SK: (0, _keys.createGSISortKey)(entry)
}]
});
// 2. When it comes to the latest record, we need to perform a couple of different
// updates, based on whether the entry being published is the latest revision or not.
const publishedRevisionId = initialPublishedStorageEntry?.id;
const publishingLatestRevision = entry.id === initialLatestStorageEntry.id;
if (publishingLatestRevision) {
// 2.1 If we're publishing the latest revision, we first need to update the L record.
entityBatch.put({
...storageEntry,
PK: partitionKey,
SK: (0, _keys.createLatestSortKey)(),
TYPE: createLatestType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
GSI1_SK: (0, _keys.createGSISortKey)(entry)
});
// 2.2 Additionally, if we have a previously published entry, we need to mark it as unpublished.
if (publishedRevisionId && publishedRevisionId !== entry.id) {
const publishedStorageEntry = convertToStorageEntry({
storageEntry: initialPublishedStorageEntry,
model
});
entityBatch.put({
...publishedStorageEntry,
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(publishedStorageEntry),
TYPE: createType(),
status: _types.CONTENT_ENTRY_STATUS.UNPUBLISHED,
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
GSI1_SK: (0, _keys.createGSISortKey)(publishedStorageEntry)
});
}
} else {
// 2.3 If the published revision is not the latest one, the situation is a bit
// more complex. We first need to update the L and REV# records with the new
// values of *only entry-level* meta fields.
const updatedEntryLevelMetaFields = (0, _constants.pickEntryMetaFields)(entry, _constants.isEntryLevelEntryMetaField);
const latestStorageEntry = convertToStorageEntry({
storageEntry: initialLatestStorageEntry,
model
});
// 2.3.1 Update L record. Apart from updating the entry-level meta fields, we also need
// to change the status from "published" to "unpublished" (if the status is set to "published").
let latestRevisionStatus = latestStorageEntry.status;
if (latestRevisionStatus === _types.CONTENT_ENTRY_STATUS.PUBLISHED) {
latestRevisionStatus = _types.CONTENT_ENTRY_STATUS.UNPUBLISHED;
}
const latestStorageEntryFields = {
...latestStorageEntry,
...updatedEntryLevelMetaFields,
status: latestRevisionStatus
};
entityBatch.put({
...latestStorageEntryFields,
PK: partitionKey,
SK: (0, _keys.createLatestSortKey)(),
TYPE: createLatestType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
GSI1_SK: (0, _keys.createGSISortKey)(latestStorageEntry)
});
// 2.3.2 Update REV# record.
entityBatch.put({
...latestStorageEntryFields,
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(latestStorageEntry),
TYPE: createType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
GSI1_SK: (0, _keys.createGSISortKey)(latestStorageEntry)
});
// 2.3.3 Finally, if we got a published entry, but it wasn't the latest one, we need to take
// an extra step and mark it as unpublished.
const publishedRevisionDifferentFromLatest = publishedRevisionId && publishedRevisionId !== latestStorageEntry.id;
if (publishedRevisionDifferentFromLatest) {
const publishedStorageEntry = convertToStorageEntry({
storageEntry: initialPublishedStorageEntry,
model
});
entityBatch.put({
...publishedStorageEntry,
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(publishedStorageEntry),
TYPE: createType(),
status: _types.CONTENT_ENTRY_STATUS.UNPUBLISHED,
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
GSI1_SK: (0, _keys.createGSISortKey)(publishedStorageEntry)
});
}
}
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
return initialStorageEntry;
} catch (ex) {
throw new _error.default(ex.message || "Could not execute the publishing batch.", ex.code || "PUBLISH_ERROR", {
entry,
latestStorageEntry: initialLatestStorageEntry,
publishedStorageEntry: initialPublishedStorageEntry
});
}
};
const unpublish = async (initialModel, params) => {
const {
entry,
storageEntry: initialStorageEntry
} = params;
const model = getStorageOperationsModel(initialModel);
const partitionKey = (0, _keys.createPartitionKey)({
id: entry.id,
locale: model.locale,
tenant: model.tenant
});
const storageEntry = convertToStorageEntry({
storageEntry: initialStorageEntry,
model
});
/**
* We need to:
* - delete currently published entry
* - update current entry revision with new data
* - update the latest entry status - if entry being unpublished is latest
*/
const entityBatch = (0, _dbDynamodb.createEntityWriteBatch)({
entity,
delete: [{
PK: partitionKey,
SK: (0, _keys.createPublishedSortKey)()
}],
put: [{
...storageEntry,
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(entry),
TYPE: createType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
GSI1_SK: (0, _keys.createGSISortKey)(entry)
}]
});
/**
* We need the latest entry to see if something needs to be updated alongside the unpublishing one.
*/
const initialLatestStorageEntry = await getLatestRevisionByEntryId(model, entry);
if (initialLatestStorageEntry) {
const unpublishingLatestRevision = entry.id === initialLatestStorageEntry.id;
if (unpublishingLatestRevision) {
entityBatch.put({
...storageEntry,
PK: partitionKey,
SK: (0, _keys.createLatestSortKey)(),
TYPE: createLatestType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
GSI1_SK: (0, _keys.createGSISortKey)(entry)
});
} else {
const latestStorageEntry = convertToStorageEntry({
storageEntry: initialLatestStorageEntry,
model
});
// If the unpublished revision is not the latest one, we still need to
// update the latest record with the new values of entry-level meta fields.
const updatedEntryLevelMetaFields = (0, _constants.pickEntryMetaFields)(entry, _constants.isEntryLevelEntryMetaField);
// 1. Update actual revision record.
entityBatch.put({
...latestStorageEntry,
...updatedEntryLevelMetaFields,
PK: partitionKey,
SK: (0, _keys.createRevisionSortKey)(latestStorageEntry),
TYPE: createType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
GSI1_SK: (0, _keys.createGSISortKey)(latestStorageEntry)
});
// 2. Update latest record.
entityBatch.put({
...latestStorageEntry,
...updatedEntryLevelMetaFields,
PK: partitionKey,
SK: (0, _keys.createLatestSortKey)(),
TYPE: createLatestType(),
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
GSI1_SK: (0, _keys.createGSISortKey)(latestStorageEntry)
});
}
}
try {
await entityBatch.execute();
dataLoaders.clearAll({
model
});
return initialStorageEntry;
} catch (ex) {
throw new _error.default(ex.message || "Could not execute unpublish batch.", ex.code || "UNPUBLISH_ERROR", {
entry,
storageEntry
});
}
};
const getUniqueFieldValues = async (model, params) => {
const {
where,
fieldId
} = params;
const field = model.fields.find(f => f.fieldId === fieldId);
if (!field) {
throw new _error.default(`Could not find field with given "fieldId" value.`, "FIELD_NOT_FOUND", {
fieldId
});
}
const {
items
} = await list(model, {
where,
limit: MAX_LIST_LIMIT
});
const result = {};
for (const item of items) {
const fieldValue = item.values[field.fieldId];
if (!fieldValue) {
continue;
}
const values = Array.isArray(fieldValue) ? fieldValue : [fieldValue];
if (values.length === 0) {
continue;
}
for (const value of values) {
result[value] = {
value,
count: (result[value]?.count || 0) + 1
};
}
}
return Object.values(result).sort((a, b) => a.value > b.value ? 1 : b.value > a.value ? -1 : 0).sort((a, b) => b.count - a.count);
};
return {
create,
createRevisionFrom,
update,
move,
delete: deleteEntry,
moveToBin,
restoreFromBin,
deleteRevision,
deleteMultipleEntries,
getPreviousRevision,
getPublishedByIds,
getLatestByIds,
getByIds,
getRevisionById,
getPublishedRevisionByEntryId,
getLatestRevisionByEntryId,
get,
getRevisions,
publish,
list,
unpublish,
dataLoaders,
getUniqueFieldValues
};
};
exports.createEntriesStorageOperations = createEntriesStorageOperations;
//# sourceMappingURL=index.js.map