azurite
Version:
An open source Azure Storage API compatible server
520 lines • 23.9 kB
JavaScript
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const tslib_1 = require("tslib");
const BatchRequest_1 = tslib_1.__importDefault(require("./BatchRequest"));
const BatchTableInsertEntityOptionalParams_1 = tslib_1.__importDefault(require("./BatchTableInsertEntityOptionalParams"));
const TableStorageContext_1 = tslib_1.__importDefault(require("../context/TableStorageContext"));
const TableBatchSerialization_1 = require("./TableBatchSerialization");
const BatchTableDeleteEntityOptionalParams_1 = tslib_1.__importDefault(require("./BatchTableDeleteEntityOptionalParams"));
const BatchTableUpdateEntityOptionalParams_1 = tslib_1.__importDefault(require("./BatchTableUpdateEntityOptionalParams"));
const BatchTableMergeEntityOptionalParams_1 = tslib_1.__importDefault(require("./BatchTableMergeEntityOptionalParams"));
const BatchTableQueryEntitiesWithPartitionAndRowKeyOptionalParams_1 = tslib_1.__importDefault(require("./BatchTableQueryEntitiesWithPartitionAndRowKeyOptionalParams"));
const uuid_1 = require("uuid");
const StorageErrorFactory_1 = tslib_1.__importDefault(require("../errors/StorageErrorFactory"));
const TableBatchRepository_1 = tslib_1.__importDefault(require("./TableBatchRepository"));
const BatchStringConstants_1 = tslib_1.__importDefault(require("./BatchStringConstants"));
const BatchErrorConstants_1 = tslib_1.__importDefault(require("./BatchErrorConstants"));
/**
* Currently there is a single distinct and concrete implementation of batch /
* entity group operations for the table api.
* The orchestrator manages the deserialization, submission and serialization of
* entity group transactions.
* ToDo: it might be possible to share code between this and the blob batch api, but there
* is relatively little commonality, due to the different ACL models and the fact that
* Azure Tables is owned by a different group to the Azure Blob Storage team.
*
* @export
* @class TableBatchOrchestrator
*/
class TableBatchOrchestrator {
constructor(context, handler, metadataStore) {
this.serialization = new TableBatchSerialization_1.TableBatchSerialization();
this.wasError = false;
this.errorResponse = "";
// add a private member which will is a map of row keys to partition keys
// this will be used to check for duplicate row keys in a batch request
this.partitionKeyMap = new Map();
this.context = context;
this.parentHandler = handler;
this.repository = new TableBatchRepository_1.default(metadataStore);
}
/**
* This is the central route / sequence of the batch orchestration.
* Takes batchRequest body, deserializes requests, submits to handlers, then returns serialized response
*
* @param {string} batchRequestBody
* @return {*} {Promise<string>}
* @memberof TableBatchManager
*/
async processBatchRequestAndSerializeResponse(batchRequestBody) {
const batchOperations = this.serialization.deserializeBatchRequest(batchRequestBody);
if (batchOperations.length > 100) {
this.wasError = true;
this.errorResponse = this.serialization.serializeGeneralRequestError(BatchErrorConstants_1.default.TOO_MANY_OPERATIONS, this.context.xMsRequestID);
}
else {
batchOperations.forEach((operation) => {
const request = new BatchRequest_1.default(operation);
this.repository.addBatchRequest(request);
});
await this.submitRequestsToHandlers();
}
return this.serializeResponses();
}
/**
* Submits requests to the appropriate handlers
* ToDo: Correct logic and handling of requests with Content ID
*
* @private
* @return {*} {Promise<void>}
* @memberof TableBatchManager
*/
async submitRequestsToHandlers() {
var _a;
let contentID = 1;
if (this.repository.getBatchRequests().length > 0) {
const accountName = ((_a = this.context).account ?? (_a.account = ""));
const tableName = this.repository.getBatchRequests()[0].getPath();
const batchId = (0, uuid_1.v4)();
this.checkForPartitionKey();
// initialize transaction rollback capability
await this.initTransaction(batchId);
let batchSuccess = true;
for (const singleReq of this.repository.getBatchRequests()) {
try {
singleReq.response = await this.routeAndDispatchBatchRequest(singleReq, this.context, contentID, batchId);
}
catch (err) {
batchSuccess = false;
this.wasError = true;
this.errorResponse = this.serialization.serializeError(err, contentID, singleReq);
break;
}
contentID++;
}
await this.repository.endBatchTransaction(accountName, tableName, batchId, this.context, batchSuccess);
}
}
/**
* Ensures that we have a partition key for the batch request
*
* @private
* @memberof TableBatchOrchestrator
*/
checkForPartitionKey() {
const requestPartitionKey = this.extractRequestPartitionKey(this.repository.getBatchRequests()[0]);
if (requestPartitionKey === undefined) {
this.wasError = true;
this.errorResponse = this.serialization.serializeGeneralRequestError(BatchErrorConstants_1.default.NO_PARTITION_KEY, this.context.xMsRequestID);
}
}
/**
* Initializes the transaction for the batch request in the metadata store
*
* @param {string} batchId
* @memberof TableBatchOrchestrator
*/
async initTransaction(batchId) {
if (this.wasError == false) {
await this.repository.beginBatchTransaction(batchId);
}
}
/**
* Serializes responses from the table handler
* see Link below for details of response format
* tslint:disable-next-line: max-line-length
* https://docs.microsoft.com/en-us/rest/api/storageservices/performing-entity-group-transactions#json-versions-2013-08-15-and-later-2
*
* @private
* @return {*} {string}
* @memberof TableBatchManager
*/
serializeResponses() {
let responseString = "";
// based on research, a stringbuilder is only worth doing with 1000s of string ops
// this can be optimized later if we get reports of slow batch operations
const batchBoundary = this.serialization.batchBoundary.replace(BatchStringConstants_1.default.BATCH_REQ_BOUNDARY, BatchStringConstants_1.default.BATCH_RES_BOUNDARY);
let changesetBoundary = this.serialization.changesetBoundary.replace(BatchStringConstants_1.default.CHANGESET_REQ_BOUNDARY, BatchStringConstants_1.default.CHANGESET_RES_BOUNDARY);
responseString += batchBoundary + BatchStringConstants_1.default.CRLF;
// (currently static header) ToDo: Validate if we need to correct headers via tests
responseString = this.serializeContentTypeAndBoundary(responseString, changesetBoundary);
const changesetBoundaryClose = BatchStringConstants_1.default.BOUNDARY_PREFIX +
changesetBoundary +
BatchStringConstants_1.default.BOUNDARY_CLOSE_SUFFIX;
changesetBoundary =
BatchStringConstants_1.default.BOUNDARY_PREFIX + changesetBoundary;
if (this.wasError === false) {
this.repository.getBatchRequests().forEach((request) => {
responseString += changesetBoundary;
responseString += request.response;
responseString += BatchStringConstants_1.default.DoubleCRLF;
});
}
else {
// serialize the error
responseString += changesetBoundary + BatchStringConstants_1.default.CRLF;
// then headers
responseString += BatchStringConstants_1.default.CONTENT_TYPE_HTTP;
responseString += BatchStringConstants_1.default.TRANSFER_ENCODING_BINARY;
responseString += BatchStringConstants_1.default.CRLF;
// then HTTP/1.1 404 etc
responseString += this.errorResponse;
}
responseString += changesetBoundaryClose;
responseString +=
batchBoundary + BatchStringConstants_1.default.BOUNDARY_CLOSE_SUFFIX;
return responseString;
}
serializeContentTypeAndBoundary(responseString, changesetBoundary) {
responseString +=
BatchStringConstants_1.default.CONTENT_TYPE_MULTIPART_AND_BOUNDARY +
changesetBoundary +
BatchStringConstants_1.default.DoubleCRLF;
return responseString;
}
/**
* Routes and dispatches single operations against the table handler and stores
* the serialized result.
*
* @private
* @param {BatchRequest} request
* @param {Context} context
* @param {number} contentID
* @return {*} {Promise<any>}
* @memberof TableBatchManager
*/
async routeAndDispatchBatchRequest(request, context, contentID, batchId) {
const batchContextClone = this.createBatchContextClone(context, request, batchId);
let response;
let __return;
// we only use 5 HTTP Verbs to determine the table operation type
try {
switch (request.getMethod()) {
case BatchStringConstants_1.default.VERB_POST:
// INSERT: we are inserting an entity
// POST https://myaccount.table.core.windows.net/mytable
({ __return, response } = await this.handleBatchInsert(request, response, batchContextClone, contentID, batchId));
break;
case BatchStringConstants_1.default.VERB_PUT:
// UPDATE: we are updating an entity
// PUT http://127.0.0.1:10002/devstoreaccount1/mytable(PartitionKey='myPartitionKey', RowKey='myRowKey')
// INSERT OR REPLACE:
// PUT https://myaccount.table.core.windows.net/mytable(PartitionKey='myPartitionKey', RowKey='myRowKey')
({ __return, response } = await this.handleBatchUpdate(request, response, batchContextClone, contentID, batchId));
break;
case BatchStringConstants_1.default.VERB_DELETE:
// DELETE: we are deleting an entity
// DELETE https://myaccount.table.core.windows.net/mytable(PartitionKey='myPartitionKey', RowKey='myRowKey')
({ __return, response } = await this.handleBatchDelete(request, response, batchContextClone, contentID, batchId));
break;
case BatchStringConstants_1.default.VERB_GET:
// QUERY : we are querying / retrieving an entity
// GET https://myaccount.table.core.windows.net/mytable(PartitionKey='<partition-key>',RowKey='<row-key>')?$select=<comma-separated-property-names>
({ __return, response } = await this.handleBatchQuery(request, response, batchContextClone, contentID, batchId));
break;
case BatchStringConstants_1.default.VERB_CONNECT:
throw new Error("Connect Method unsupported in batch.");
break;
case BatchStringConstants_1.default.VERB_HEAD:
throw new Error("Head Method unsupported in batch.");
break;
case BatchStringConstants_1.default.VERB_OPTIONS:
throw new Error("Options Method unsupported in batch.");
break;
case BatchStringConstants_1.default.VERB_TRACE:
throw new Error("Trace Method unsupported in batch.");
break;
case BatchStringConstants_1.default.VERB_PATCH:
// this is using the PATCH verb to merge
({ __return, response } = await this.handleBatchMerge(request, response, batchContextClone, contentID, batchId));
break;
default:
// MERGE: this must be the merge, as the merge operation is not currently generated by autorest
// MERGE https://myaccount.table.core.windows.net/mytable(PartitionKey='myPartitionKey', RowKey='myRowKey')
// INSERT OR MERGE
// MERGE https://myaccount.table.core.windows.net/mytable(PartitionKey='myPartitionKey', RowKey='myRowKey')
({ __return, response } = await this.handleBatchMerge(request, response, batchContextClone, contentID, batchId));
}
}
catch (batchException) {
// this allows us to catch and debug any errors in the batch handling
throw batchException;
}
return __return;
}
/**
* Creates a clone of the context for the batch operation.
* Because the context that we have will not work with the calls and needs
* updating for batch operations.
* We use a deep clone, as each request needs to be treated separately.
*
* @private
* @param {Context} context
* @param {BatchRequest} request
* @return {*}
* @memberof TableBatchOrchestrator
*/
createBatchContextClone(context, request, batchId) {
const batchContextClone = Object.create(context);
batchContextClone.tableName = request.getPath();
batchContextClone.path = request.getPath();
const updatedContext = new TableStorageContext_1.default(batchContextClone);
updatedContext.request = request;
updatedContext.batchId = batchId;
return updatedContext;
}
/**
* Handles an insert operation inside a batch
*
* @private
* @param {BatchRequest} request
* @param {*} response
* @param {*} batchContextClone
* @param {number} contentID
* @return {*} {Promise<{
* __return: string;
* response: any;
* }>}
* @memberof TableBatchManager
*/
async handleBatchInsert(request, response, batchContextClone, contentID, batchId) {
request.ingestOptionalParams(new BatchTableInsertEntityOptionalParams_1.default());
const { partitionKey, rowKey } = this.extractKeys(request);
this.validateBatchRequest(partitionKey, rowKey, batchContextClone);
response = await this.parentHandler.insertEntity(request.getPath(), request.params, batchContextClone);
return {
__return: this.serialization.serializeTableInsertEntityBatchResponse(request, response),
response
};
}
/**
* Handles a delete Operation inside a batch request
*
* @private
* @param {BatchRequest} request
* @param {*} response
* @param {*} batchContextClone
* @param {number} contentID
* @return {*} {Promise<{
* __return: string;
* response: any;
* }>}
* @memberof TableBatchManager
*/
async handleBatchDelete(request, response, batchContextClone, contentID, batchId) {
request.ingestOptionalParams(new BatchTableDeleteEntityOptionalParams_1.default());
const ifmatch = request.getHeader(BatchStringConstants_1.default.IF_MATCH_HEADER_STRING) ||
BatchStringConstants_1.default.ASTERISK;
const { partitionKey, rowKey } = this.extractKeys(request);
this.validateBatchRequest(partitionKey, rowKey, batchContextClone);
response = await this.parentHandler.deleteEntity(request.getPath(), partitionKey, rowKey, ifmatch, request.params, batchContextClone);
return {
__return: this.serialization.serializeTableDeleteEntityBatchResponse(request, response),
response
};
}
extractKeys(request) {
const partitionKey = this.extractRequestPartitionKey(request);
const rowKey = this.extractRequestRowKey(request);
return { partitionKey, rowKey };
}
/**
* Handles an update Operation inside a batch request
*
* @private
* @param {BatchRequest} request
* @param {*} response
* @param {*} batchContextClone
* @param {number} contentID
* @return {*} {Promise<{
* __return: string;
* response: any;
* }>}
* @memberof TableBatchManager
*/
async handleBatchUpdate(request, response, batchContextClone, contentID, batchId) {
request.ingestOptionalParams(new BatchTableUpdateEntityOptionalParams_1.default());
const { partitionKey, rowKey } = this.extractKeys(request);
this.validateBatchRequest(partitionKey, rowKey, batchContextClone);
const ifMatch = request.getHeader(BatchStringConstants_1.default.IF_MATCH_HEADER_STRING);
response = await this.parentHandler.updateEntity(request.getPath(), partitionKey, rowKey, {
ifMatch,
...request.params
}, batchContextClone);
return {
__return: this.serialization.serializeTableUpdateEntityBatchResponse(request, response),
response
};
}
/**
* Handles a query operation inside a batch request,
* should only ever be one operation if there is a query
*
* @private
* @param {BatchRequest} request
* @param {*} response
* @param {*} batchContextClone
* @param {number} contentID
* @return {*} {Promise<{
* __return: string;
* response: any;
* }>}
* @memberof TableBatchManager
*/
async handleBatchQuery(request, response, batchContextClone, contentID, batchId) {
// need to validate that query is the only request in the batch!
const { partitionKey, rowKey } = this.extractKeys(request);
if (null !== partitionKey &&
null !== rowKey &&
partitionKey !== "" &&
rowKey !== "") {
// ToDo: this is hideous... but we need the params on the request object,
// as they percolate through and are needed for the final serialization
// currently, because of the way we deconstruct / deserialize, we only
// have the right model at a very late stage in processing
// this might resolve when we simplify Query logic
// based on only accepting Query with partition and row key
request.ingestOptionalParams(new BatchTableQueryEntitiesWithPartitionAndRowKeyOptionalParams_1.default());
response = await this.parentHandler.queryEntitiesWithPartitionAndRowKey(request.getPath(), partitionKey, rowKey, request.params, batchContextClone);
return {
__return: await this.serialization.serializeTableQueryEntityWithPartitionAndRowKeyBatchResponse(request, response),
response
};
}
else {
throw StorageErrorFactory_1.default.getNotImplementedError(batchContextClone);
}
}
/**
* Handles a merge operation inside a batch request
*
* @private
* @param {BatchRequest} request
* @param {*} response
* @param {*} batchContextClone
* @param {number} contentID
* @return {*} {Promise<{
* __return: string;
* response: any;
* }>}
* @memberof TableBatchManager
*/
async handleBatchMerge(request, response, batchContextClone, contentID, batchId) {
request.ingestOptionalParams(new BatchTableMergeEntityOptionalParams_1.default());
const { partitionKey, rowKey } = this.extractKeys(request);
this.validateBatchRequest(partitionKey, rowKey, batchContextClone);
response = await this.parentHandler.mergeEntity(request.getPath(), partitionKey, rowKey, {
ifMatch: request.getHeader(BatchStringConstants_1.default.IF_MATCH_HEADER_STRING),
...request.params
}, batchContextClone);
return {
__return: this.serialization.serializeTableMergeEntityBatchResponse(request, response),
response
};
}
/**
* extracts the Partition key from a request
*
* @private
* @param {BatchRequest} request
* @return {*} {string}
* @memberof TableBatchOrchestrator
*/
extractRequestPartitionKey(request) {
let partitionKey;
const originalUrl = request.getUrl();
const url = decodeURIComponent(originalUrl);
const partKeyMatch = url.match(/(?<=PartitionKey=')(.*)(?=',)/gi);
if (partKeyMatch === null) {
// row key not in URL, must be in body
const body = request.getBody();
if (body !== "") {
const jsonBody = JSON.parse(body ? body : "{}");
partitionKey = jsonBody.PartitionKey;
}
}
else {
// keys can have more complex values which are URI encoded if they come from the URL
// we decode above.
partitionKey = partKeyMatch[0];
// Url should use double ticks and we need to remove them
partitionKey = this.replaceDoubleTicks(partitionKey);
}
return partitionKey;
}
/**
* Helper function to extract values needed for handler calls
*
* @private
* @param {BatchRequest} request
* @return { string }
* @memberof TableBatchManager
*/
extractRequestRowKey(request) {
let rowKey;
// problem: sometimes the ticks are encoded, sometimes not!
// this is a difference between Azure Data-Tables and the deprecated
// Azure Storage SDK decode URI component will not remove double ticks
const url = decodeURIComponent(request.getUrl());
const rowKeyMatch = url.match(/(?<=RowKey=')(.+)(?='\))/gi);
rowKey = rowKeyMatch ? rowKeyMatch[0] : "";
// Url should use double ticks and we need to remove them
rowKey = this.replaceDoubleTicks(rowKey);
if (rowKeyMatch === null) {
// row key not in URL, must be in body
const body = request.getBody();
if (body !== "") {
const jsonBody = JSON.parse(body ? body : "{}");
rowKey = jsonBody.RowKey;
}
}
return rowKey;
}
/**
* Replace Double ticks for single ticks without replaceAll string prototype
* function, because node 14 does not support it.
* @param key
* @returns
*/
replaceDoubleTicks(key) {
const result = key.replace(/''/g, "'");
return result;
}
/**
* Helper function to validate batch requests.
* Additional validation functions should be added here.
*
* @private
* @param {string} partitionKey
* @param {string} rowKey
* @param {*} batchContextClone
* @memberof TableBatchOrchestrator
*/
validateBatchRequest(partitionKey, rowKey, batchContextClone) {
if (partitionKey === undefined) {
throw StorageErrorFactory_1.default.getInvalidInput(batchContextClone);
}
this.checkForDuplicateRowKey(partitionKey, rowKey, batchContextClone);
}
/**
*
*
*
* @private
* @param {string} partitionKey
* @param {string} rowKey
* @param {*} batchContextClone
* @memberof TableBatchOrchestrator
*/
checkForDuplicateRowKey(partitionKey, rowKey, batchContextClone) {
const key = partitionKey + rowKey;
if (this.partitionKeyMap.has(key)) {
throw StorageErrorFactory_1.default.getBatchDuplicateRowKey(batchContextClone, rowKey);
}
else {
this.partitionKeyMap.set(key, partitionKey);
}
}
}
exports.default = TableBatchOrchestrator;
//# sourceMappingURL=TableBatchOrchestrator.js.map