azurite
Version:
An open source Azure Storage API compatible server
795 lines • 30.1 kB
JavaScript
Object.defineProperty(exports, "__esModule", { value: true });
const tslib_1 = require("tslib");
const fs_1 = require("fs");
const lokijs_1 = tslib_1.__importDefault(require("lokijs"));
const NotImplementedError_1 = tslib_1.__importDefault(require("../errors/NotImplementedError"));
const StorageErrorFactory_1 = tslib_1.__importDefault(require("../errors/StorageErrorFactory"));
const constants_1 = require("../utils/constants");
const LokiTableStoreQueryGenerator_1 = tslib_1.__importDefault(require("./LokiTableStoreQueryGenerator"));
const utils_1 = require("../../common/utils/utils");
class LokiTableMetadataStore {
constructor(lokiDBPath, inMemory) {
this.lokiDBPath = lokiDBPath;
this.TABLES_COLLECTION = "$TABLES_COLLECTION$";
this.SERVICES_COLLECTION = "$SERVICES_COLLECTION$";
this.initialized = false;
this.closed = false;
// The Rollback Entities arrays hold the rows that we will reapply to the database in the case
// that we need to rollback a transaction.
// We make the assumption that there will not be any IO during the processing of a transaction
// and assume that the execution will remain in the same thread associated with the transaction.
// See: https://nodejs.org/en/docs/guides/event-loop-timers-and-nexttick/
this.transactionRollbackTheseEntities = []; // can maybe use Entity instead of any
this.transactionDeleteTheseEntities = []; // can maybe use Entity instead of any
this.db = new lokijs_1.default(lokiDBPath, inMemory ? {
persistenceMethod: "memory"
} : {
persistenceMethod: "fs",
autosave: true,
autosaveInterval: 5000
});
}
/**
* Initializes the persistence layer
*
* @return {*} {Promise<void>}
* @memberof LokiTableMetadataStore
*/
async init() {
await this.loadDB();
this.createTablesCollection();
this.createServicePropsCollection();
await this.saveDBState();
this.finalizeInitializationState();
}
/**
* Close down the DB
*
* @return {*} {Promise<void>}
* @memberof LokiTableMetadataStore
*/
async close() {
await new Promise((resolve, reject) => {
this.db.close((err) => {
if (err) {
reject(err);
}
else {
resolve();
}
});
});
this.closed = true;
}
/**
* Clean LokiTableMetadataStore.
*
* @returns {Promise<void>}
* @memberof LokiTableMetadataStore
*/
async clean() {
if (this.isClosed()) {
await (0, utils_1.rimrafAsync)(this.lokiDBPath);
return;
}
throw new Error(`Cannot clean LokiTableMetadataStore, it's not closed.`);
}
isInitialized() {
return this.initialized;
}
isClosed() {
return this.closed;
}
/**
* Create a table in the persistence layer
*
* @param {Context} context
* @param {Table} tableModel
* @return {*} {Promise<void>}
* @memberof LokiTableMetadataStore
*/
async createTable(context, tableModel) {
// Check for table entry in the table registry collection
const coll = this.db.getCollection(this.TABLES_COLLECTION);
// Azure Storage Service is case-insensitive
tableModel.table = tableModel.table;
this.checkIfTableExists(coll, tableModel, context);
coll.insert(tableModel);
this.createCollectionForTable(tableModel);
}
/**
* Delete a table from the persistence layer
*
* @param {Context} context
* @param {string} table
* @param {string} account
* @return {*} {Promise<void>}
* @memberof LokiTableMetadataStore
*/
async deleteTable(context, table, account) {
// remove table reference from collection registry
const coll = this.db.getCollection(this.TABLES_COLLECTION);
// Azure Storage Service is case-insensitive
const tableLower = table.toLocaleLowerCase();
const doc = coll.findOne({
account,
table: { $regex: [`^${tableLower}$`, "i"] }
});
this.checkIfResourceExists(doc, context);
coll.remove(doc);
this.removeTableCollection(account, doc);
}
/**
* Update the ACL of an existing table item in persistency layer.
*
* @param {string} account
* @param {string} table
* @param {TableACL} [tableACL]
* @param {Context} context
* @returns {Promise<void>}
* @memberof LokiTableMetadataStore
*/
async setTableACL(account, table, context, tableACL) {
const coll = this.db.getCollection(this.TABLES_COLLECTION);
// Azure Storage Service is case-insensitive
const tableLower = table.toLocaleLowerCase();
const persistedTable = coll.findOne({
account,
table: { $regex: [`^${tableLower}$`, "i"] }
});
if (!persistedTable) {
throw StorageErrorFactory_1.default.getTableNotFound(context);
}
persistedTable.tableAcl = tableACL;
coll.update(persistedTable);
}
/**
* Gets a table from the loki js persistence layer.
*
* @param {string} account
* @param {string} table
* @param {Context} context
* @return {*} {Promise<Table>}
* @memberof LokiTableMetadataStore
*/
async getTable(account, table, context) {
const coll = this.db.getCollection(this.TABLES_COLLECTION);
// Azure Storage Service is case-insensitive
const doc = coll.findOne({
account,
table: { $regex: [`^${table}$`, "i"] }
});
if (!doc) {
throw StorageErrorFactory_1.default.getTableNotFound(context);
}
return doc;
}
async queryTable(context, account, queryOptions, nextTable) {
const coll = this.db.getCollection(this.TABLES_COLLECTION);
const filter = { account };
if (nextTable) {
filter.table = { $gte: nextTable };
}
let queryWhere;
try {
queryWhere = LokiTableStoreQueryGenerator_1.default.generateQueryTableWhereFunction(queryOptions.filter);
}
catch (e) {
throw StorageErrorFactory_1.default.getQueryConditionInvalid(context);
}
const top = queryOptions.top || 1000;
const docList = coll
.chain()
.find(filter)
.where(queryWhere)
.simplesort("table")
.limit(top + 1)
.data();
let nextTableName;
if (docList.length > top) {
const tail = docList.pop();
nextTableName = tail.table;
}
if (!docList) {
throw StorageErrorFactory_1.default.getEntityNotFound(context);
}
return [docList, nextTableName];
}
async insertTableEntity(context, table, account, entity, batchId) {
const tableEntityCollection = this.getEntityCollection(account, table, context);
const doc = tableEntityCollection.findOne({
PartitionKey: entity.PartitionKey,
RowKey: entity.RowKey
});
if (doc) {
throw StorageErrorFactory_1.default.getEntityAlreadyExist(context);
}
entity.properties.Timestamp = entity.lastModifiedTime;
entity.properties["Timestamp@odata.type"] = "Edm.DateTime";
if (batchId !== "" && batchId !== undefined) {
this.transactionDeleteTheseEntities.push(entity);
}
tableEntityCollection.insert(entity);
return entity;
}
async insertOrUpdateTableEntity(context, table, account, entity, ifMatch, batchId) {
if (ifMatch === undefined) {
// Upsert
const existingEntity = await this.queryTableEntitiesWithPartitionAndRowKey(context, table, account, entity.PartitionKey, entity.RowKey, batchId);
if (existingEntity) {
// Update
return this.updateTableEntity(context, table, account, entity, ifMatch, batchId);
}
else {
// Insert
return this.insertTableEntity(context, table, account, entity, batchId);
}
}
else {
// Update
return this.updateTableEntity(context, table, account, entity, ifMatch, batchId);
}
}
async insertOrMergeTableEntity(context, table, account, entity, ifMatch, batchId) {
if (ifMatch === undefined) {
// Upsert
const existingEntity = await this.queryTableEntitiesWithPartitionAndRowKey(context, table, account, entity.PartitionKey, entity.RowKey);
if (existingEntity) {
// Merge
return this.mergeTableEntity(context, table, account, entity, ifMatch, batchId);
}
else {
// Insert
return this.insertTableEntity(context, table, account, entity, batchId);
}
}
else {
// Merge
return this.mergeTableEntity(context, table, account, entity, ifMatch, batchId);
}
}
async deleteTableEntity(context, table, account, partitionKey, rowKey, etag, batchId) {
const tableEntityCollection = this.getEntityCollection(account, table, context);
if (partitionKey !== undefined && rowKey !== undefined) {
const doc = tableEntityCollection.findOne({
PartitionKey: partitionKey,
RowKey: rowKey
});
this.checkForMissingEntity(doc, context);
this.checkIfMatchPrecondition(etag, doc, context);
this.trackRollback(batchId, doc);
tableEntityCollection.remove(doc);
return;
}
throw StorageErrorFactory_1.default.getPropertiesNeedValue(context);
}
async queryTableEntities(context, account, table, queryOptions, nextPartitionKey, nextRowKey) {
const tableEntityCollection = this.getEntityCollection(account, table, context);
const queryWhere = LokiTableStoreQueryGenerator_1.default.generateQueryForPersistenceLayer(queryOptions, context);
const maxResults = this.getMaxResultsOption(queryOptions);
// Decode the nextPartitionKey and nextRowKey. This is necessary since non-ASCII characters can
// be in partition and row keys but should not be in headers.
const decodedNextPartitionKey = this.decodeContinuationHeader(nextPartitionKey);
const decodedNextRowKey = this.decodeContinuationHeader(nextRowKey);
// .find using a segment filter is not filtering in the same way that the sorting function sorts
// I think offset will cause more problems than it solves, as we will have to step and sort all
// results here, so I am adding 2 additional predicates here to cover the cases with
// multiple partitions and rows to paginate
const result = tableEntityCollection
.chain()
.where(queryWhere)
.where((data) => {
if (decodedNextPartitionKey !== undefined) {
if (data.PartitionKey > decodedNextPartitionKey) {
return true;
}
}
if (decodedNextRowKey !== undefined) {
if (data.RowKey >= decodedNextRowKey &&
(data.PartitionKey === decodedNextPartitionKey ||
data.PartitionKey === undefined)) {
return true;
}
return false;
}
if (decodedNextPartitionKey !== undefined) {
if (data.PartitionKey < decodedNextPartitionKey) {
return false;
}
}
return true;
})
.sort((obj1, obj2) => {
if (obj1.PartitionKey > obj2.PartitionKey) {
return 1;
}
else if (obj1.PartitionKey === obj2.PartitionKey) {
if (obj1.RowKey > obj2.RowKey) {
return 1;
}
else if (obj1.RowKey === obj2.RowKey) {
return 0;
}
else {
return -1;
}
}
else {
return -1;
}
})
.limit(maxResults + 1)
.data();
const response = this.adjustQueryResultforTop(result, maxResults);
return [
result,
response.nextPartitionKeyResponse,
response.nextRowKeyResponse
];
}
async queryTableEntitiesWithPartitionAndRowKey(context, table, account, partitionKey, rowKey, batchId) {
const entityCollection = this.getEntityCollection(account, table, context);
const requestedDoc = entityCollection.findOne({
PartitionKey: partitionKey,
RowKey: rowKey
});
return requestedDoc;
}
async getTableAccessPolicy(context, table) {
// TODO
throw new NotImplementedError_1.default(context);
}
async setTableAccessPolicy(context, table) {
// TODO
throw new NotImplementedError_1.default(context);
}
/**
* Get service properties for specific storage account.
*
* @param {string} account
* @returns {Promise<ServicePropertiesModel | undefined>}
* @memberof LokiBlobMetadataStore
*/
async getServiceProperties(context, account) {
const coll = this.db.getCollection(this.SERVICES_COLLECTION);
if (coll) {
const doc = coll.by("accountName", account);
return doc ? doc : undefined;
}
return undefined;
}
/**
* Update table service properties.
* THis will create service properties if they do not exist in the persistence layer.
*
* TODO: Account's service property should be created when storage account is created or metadata
* storage initialization. This method should only be responsible for updating existing record.
* In this way, we can reduce one I/O call to get account properties.
* Undefined properties will be ignored during properties setup.
*
* @param {ServicePropertiesModel} serviceProperties
* @returns {Promise<ServicePropertiesModel>}
* @memberof LokiBlobMetadataStore
*/
async setServiceProperties(context, serviceProperties) {
const coll = this.db.getCollection(this.SERVICES_COLLECTION);
const doc = coll.by("accountName", serviceProperties.accountName);
if (doc) {
doc.cors =
serviceProperties.cors === undefined
? doc.cors
: serviceProperties.cors;
doc.hourMetrics =
serviceProperties.hourMetrics === undefined
? doc.hourMetrics
: serviceProperties.hourMetrics;
doc.logging =
serviceProperties.logging === undefined
? doc.logging
: serviceProperties.logging;
doc.minuteMetrics =
serviceProperties.minuteMetrics === undefined
? doc.minuteMetrics
: serviceProperties.minuteMetrics;
return coll.update(doc);
}
else {
return coll.insert(serviceProperties);
}
}
/**
* Validates state for start of batch.
* Instead of copying all entities / rows in the collection,
* we shall just backup those rows that we change.
* Keeping the batchId in the interface to allow logging scenarios to extend.
*
* @param {string} batchId
* @return {*} {Promise<void>}
* @memberof LokiTableMetadataStore
*/
async beginBatchTransaction(batchId) {
if (this.transactionRollbackTheseEntities.length > 0 ||
this.transactionDeleteTheseEntities.length > 0) {
throw new Error("Transaction Overlap!");
}
}
/**
* Ends a batch transaction and will allow for rollback if needed.
*
* @param {string} account
* @param {string} table
* @param {string} batchId
* @param {Context} context
* @param {boolean} succeeded
* @return {*} {Promise<void>}
* @memberof LokiTableMetadataStore
*/
async endBatchTransaction(account, table, batchId, context, succeeded) {
// rollback all changes in the case of failed batch transaction
if (!succeeded) {
const tableBatchCollection = this.db.getCollection(this.getTableCollectionName(account, table));
if (tableBatchCollection) {
this.rollbackEntityChanges(tableBatchCollection);
this.removeEntitiesAddedInBatch(tableBatchCollection);
}
}
// reset entity rollback trackers
this.transactionRollbackTheseEntities = [];
this.transactionDeleteTheseEntities = [];
}
/**
* Sets variables that track state of initialized DB
*
* @private
* @memberof LokiTableMetadataStore
*/
finalizeInitializationState() {
this.initialized = true;
this.closed = false;
}
/**
* Loads the DB from disk
*
* @private
* @memberof LokiTableMetadataStore
*/
async loadDB() {
await new Promise((resolve, reject) => {
(0, fs_1.stat)(this.lokiDBPath, (statError, stats) => {
if (!statError) {
this.db.loadDatabase({}, (dbError) => {
if (dbError) {
reject(dbError);
}
else {
resolve();
}
});
}
else {
// when DB file doesn't exist, ignore the error because following will re-create the file
resolve();
}
});
});
}
async saveDBState() {
await new Promise((resolve, reject) => {
this.db.saveDatabase((err) => {
if (err) {
reject(err);
}
else {
resolve();
}
});
});
}
/**
* Creates the Service Properties collection if it does not exist
*
* @private
* @memberof LokiTableMetadataStore
*/
createServicePropsCollection() {
let servicePropertiesColl = this.db.getCollection(this.SERVICES_COLLECTION);
if (servicePropertiesColl === null) {
servicePropertiesColl = this.db.addCollection(this.SERVICES_COLLECTION, {
unique: ["accountName"]
});
}
}
/**
* Creates the tables collection if it does not exist
*
* @private
* @memberof LokiTableMetadataStore
*/
createTablesCollection() {
if (this.db.getCollection(this.TABLES_COLLECTION) === null) {
this.db.addCollection(this.TABLES_COLLECTION, {
// Optimization for indexing and searching
// https://rawgit.com/techfort/LokiJS/master/jsdoc/tutorial-Indexing%20and%20Query%20performance.html
indices: ["account", "table"]
});
}
}
/**
* Create a collection to represent the table using a unique string.
* This optimizes using an index for find operations.
*
* @private
* @param {Table} tableModel
* @memberof LokiTableMetadataStore
*/
createCollectionForTable(tableModel) {
const tableCollectionName = this.getTableCollectionName(tableModel.account, tableModel.table);
const extentColl = this.db.getCollection(tableCollectionName);
if (extentColl) {
this.db.removeCollection(tableCollectionName);
}
this.db.addCollection(tableCollectionName, {
// Optimization for indexing and searching
// https://rawgit.com/techfort/LokiJS/master/jsdoc/tutorial-Indexing%20and%20Query%20performance.html
indices: ["PartitionKey", "RowKey"]
});
}
/**
* Throws an exception if a table exists
*
* @private
* @param {Collection<any>} coll
* @param {Table} tableModel
* @param {Context} context
* @memberof LokiTableMetadataStore
*/
checkIfTableExists(coll, tableModel, context) {
const doc = coll.findOne({
account: tableModel.account,
table: { $regex: [String.raw `\b${tableModel.table}\b`, "i"] }
});
// If the metadata exists, we will throw getTableAlreadyExists error
if (doc) {
throw StorageErrorFactory_1.default.getTableAlreadyExists(context);
}
}
/**
* With throw a storage exception if resource not found.
*
* @private
* @param {*} doc
* @param {Context} context
* @memberof LokiTableMetadataStore
*/
checkIfResourceExists(doc, context) {
if (!doc) {
throw StorageErrorFactory_1.default.ResourceNotFound(context);
}
}
/**
* Removes a table collection and index when deleting a table.
*
* @private
* @param {string} account
* @param {*} doc
* @memberof LokiTableMetadataStore
*/
removeTableCollection(account, doc) {
const tableCollectionName = this.getTableCollectionName(account, doc.table);
const tableEntityCollection = this.db.getCollection(tableCollectionName);
if (tableEntityCollection) {
this.db.removeCollection(tableCollectionName);
}
}
/**
* Gets the collection of entities for a specific table.
* Ensures that table name is case-insensitive.
*
* @private
* @param {string} account
* @param {string} table
* @param {Context} context
* @return {*} {Collection<any>}
* @memberof LokiTableMetadataStore
*/
getEntityCollection(account, table, context) {
let tableEntityCollection = this.db.getCollection(this.getTableCollectionName(account, table.toLowerCase()));
if (!tableEntityCollection) {
// this is to avoid a breaking change for users of persisted storage
tableEntityCollection = this.db.getCollection(this.getTableCollectionName(account, table));
if (!tableEntityCollection) {
throw StorageErrorFactory_1.default.getTableNotExist(context);
}
}
return tableEntityCollection;
}
trackRollback(batchId, doc) {
if (batchId !== "") {
this.transactionRollbackTheseEntities.push(doc);
}
}
checkIfMatchPrecondition(etag, doc, context) {
if (etag !== "*" && doc.eTag !== etag) {
throw StorageErrorFactory_1.default.getPreconditionFailed(context);
}
}
checkForMissingEntity(doc, context) {
if (!doc) {
throw StorageErrorFactory_1.default.getEntityNotFound(context);
}
}
getMaxResultsOption(queryOptions) {
if (undefined === queryOptions.top ||
null === queryOptions.top ||
constants_1.QUERY_RESULT_MAX_NUM < queryOptions.top) {
return constants_1.QUERY_RESULT_MAX_NUM;
}
return queryOptions.top;
}
/**
* Adjusts the query result for the max results specified in top parameter
*
* @private
* @param {any[]} result
* @param {number} maxResults
* @param {*} nextPartitionKeyResponse
* @param {*} nextRowKeyResponse
* @return {*}
* @memberof LokiTableMetadataStore
*/
adjustQueryResultforTop(result, maxResults) {
let nextPartitionKeyResponse;
let nextRowKeyResponse;
if (result.length > maxResults) {
const tail = result.pop();
nextPartitionKeyResponse = this.encodeContinuationHeader(tail.PartitionKey);
nextRowKeyResponse = this.encodeContinuationHeader(tail.RowKey);
}
return { nextPartitionKeyResponse, nextRowKeyResponse };
}
decodeContinuationHeader(input) {
if (input !== undefined) {
return Buffer.from(input, "base64").toString("utf8");
}
}
encodeContinuationHeader(input) {
if (input !== undefined) {
return Buffer.from(input, "utf8").toString("base64");
}
}
async updateTableEntity(context, table, account, entity, ifMatch, batchId) {
const tableEntityCollection = this.getEntityCollection(account, table, context);
const doc = tableEntityCollection.findOne({
PartitionKey: entity.PartitionKey,
RowKey: entity.RowKey
});
if (!doc) {
throw StorageErrorFactory_1.default.getEntityNotFound(context);
}
if (batchId !== "") {
this.transactionRollbackTheseEntities.push(doc);
}
// Test if etag value is valid
const encodedEtag = this.encodeIfMatch(doc.eTag);
let encodedIfMatch;
if (ifMatch !== undefined) {
encodedIfMatch = this.encodeIfMatch(ifMatch);
}
if (encodedIfMatch === undefined ||
encodedIfMatch === "*" ||
(encodedIfMatch !== undefined && encodedEtag === encodedIfMatch)) {
tableEntityCollection.remove(doc);
entity.properties.Timestamp = entity.lastModifiedTime;
entity.properties["Timestamp@odata.type"] = "Edm.DateTime";
tableEntityCollection.insert(entity);
return entity;
}
throw StorageErrorFactory_1.default.getPreconditionFailed(context);
}
async mergeTableEntity(context, table, account, entity, ifMatch, batchId) {
const tableEntityCollection = this.getEntityCollection(account, table, context);
const doc = tableEntityCollection.findOne({
PartitionKey: entity.PartitionKey,
RowKey: entity.RowKey
});
if (!doc) {
throw StorageErrorFactory_1.default.getEntityNotFound(context);
}
if (batchId !== "") {
this.transactionRollbackTheseEntities.push(doc);
}
// if match is URL encoded from the clients, match URL encoding
// this does not always seem to be consistent...
const encodedEtag = this.encodeIfMatch(doc.eTag);
let encodedIfMatch;
encodedIfMatch = this.encodeIfMatch(ifMatch);
if (encodedIfMatch === undefined ||
encodedIfMatch === "*" ||
(encodedIfMatch !== undefined && encodedEtag === encodedIfMatch)) {
const mergedEntity = {
...doc,
...entity,
properties: {
...doc.properties
// ...entity.properties
}
};
// Merge inner properties
for (const key in entity.properties) {
if (Object.prototype.hasOwnProperty.call(entity.properties, key)) {
if (key.endsWith(constants_1.ODATA_TYPE)) {
continue;
}
const value = entity.properties[key];
mergedEntity.properties[key] = value;
this.filterOdataMetaData(entity, key, mergedEntity);
}
}
tableEntityCollection.update(mergedEntity);
return mergedEntity;
}
throw StorageErrorFactory_1.default.getPreconditionFailed(context);
}
filterOdataMetaData(entity, key, mergedEntity) {
if (entity.properties[`${key}${constants_1.ODATA_TYPE}`] !== undefined) {
mergedEntity.properties[`${key}${constants_1.ODATA_TYPE}`] =
entity.properties[`${key}${constants_1.ODATA_TYPE}`];
}
else {
delete mergedEntity.properties[`${key}${constants_1.ODATA_TYPE}`];
}
}
encodeIfMatch(ifMatch) {
let encodeIfMatch;
if (ifMatch !== undefined) {
encodeIfMatch = ifMatch.replace(":", "%3A").replace(":", "%3A");
}
return encodeIfMatch;
}
getTableCollectionName(account, table) {
return `${account}$${table}`;
}
/**
* Rolls back changes for deleted or modified entities
*
* @private
* @param {Collection<any>} tableBatchCollection
* @memberof LokiTableMetadataStore
*/
rollbackEntityChanges(tableBatchCollection) {
for (const entity of this.transactionRollbackTheseEntities) {
const copiedEntity = {
PartitionKey: entity.PartitionKey,
RowKey: entity.RowKey,
properties: entity.properties,
lastModifiedTime: entity.lastModifiedTime,
eTag: entity.eTag
};
// lokijs applies this insert as an upsert
const doc = tableBatchCollection.findOne({
PartitionKey: entity.PartitionKey,
RowKey: entity.RowKey
});
// we can't rely on upsert behavior if documents already exist
if (doc) {
tableBatchCollection.remove(doc);
}
tableBatchCollection.insert(copiedEntity);
}
}
/**
* Removes entities added to the batch collection
*
* @private
* @param {Collection<any>} tableBatchCollection
* @memberof LokiTableMetadataStore
*/
removeEntitiesAddedInBatch(tableBatchCollection) {
for (const deleteRow of this.transactionDeleteTheseEntities) {
tableBatchCollection.remove(deleteRow);
}
}
}
exports.default = LokiTableMetadataStore;
//# sourceMappingURL=LokiTableMetadataStore.js.map
;