azurite
Version:
An open source Azure Storage API compatible server
703 lines • 34.9 kB
JavaScript
Object.defineProperty(exports, "__esModule", { value: true });
const tslib_1 = require("tslib");
const to_readable_stream_1 = tslib_1.__importDefault(require("to-readable-stream"));
const BufferStream_1 = tslib_1.__importDefault(require("../../common/utils/BufferStream"));
const utils_1 = require("../utils/utils");
const TableBatchOrchestrator_1 = tslib_1.__importDefault(require("../batch/TableBatchOrchestrator"));
const TableBatchUtils_1 = tslib_1.__importDefault(require("../batch/TableBatchUtils"));
const TableStorageContext_1 = tslib_1.__importDefault(require("../context/TableStorageContext"));
const NormalizedEntity_1 = require("../entity/NormalizedEntity");
const StorageErrorFactory_1 = tslib_1.__importDefault(require("../errors/StorageErrorFactory"));
const constants_1 = require("../utils/constants");
const utils_2 = require("../utils/utils");
const BaseHandler_1 = tslib_1.__importDefault(require("./BaseHandler"));
const IEdmType_1 = require("../entity/IEdmType");
const utils_3 = require("../../common/utils/utils");
/**
* TODO:
* 1. Check Accept for every API
* 2. Check Prefer for every API
*/
class TableHandler extends BaseHandler_1.default {
async create(tableProperties, options, context) {
const tableContext = new TableStorageContext_1.default(context);
const accept = this.getAndCheckPayloadFormat(tableContext);
const account = this.getAndCheckAccountName(tableContext);
// Table name is in request body instead of URL
const table = tableProperties.tableName;
if (table === undefined) {
throw StorageErrorFactory_1.default.getTableNameEmpty(context);
}
// validate table name
if (table !== undefined) {
(0, utils_2.validateTableName)(context, table);
}
const tableModel = {
account,
table
};
await this.metadataStore.createTable(context, tableModel);
const response = {
clientRequestId: options.requestId,
requestId: tableContext.contextID,
version: constants_1.TABLE_API_VERSION,
date: context.startTime,
statusCode: 201
};
response.tableName = table;
(0, utils_2.updateTableOptionalOdataAnnotationsForResponse)(response, account, table, this.getOdataAnnotationUrlPrefix(tableContext, account), accept);
this.updateResponsePrefer(response, tableContext);
this.updateResponseAccept(tableContext, accept);
return response;
}
async delete(_table, options, context) {
const tableContext = new TableStorageContext_1.default(context);
const account = this.getAndCheckAccountName(tableContext);
const table = this.getAndCheckTableName(tableContext);
const accept = this.getAndCheckPayloadFormat(tableContext);
await this.metadataStore.deleteTable(context, table, account);
const response = {
clientRequestId: options.requestId,
requestId: tableContext.contextID,
version: constants_1.TABLE_API_VERSION,
date: context.startTime,
statusCode: 204
};
this.updateResponseAccept(tableContext, accept);
return response;
}
async query(options, context) {
const tableContext = new TableStorageContext_1.default(context);
const account = this.getAndCheckAccountName(tableContext);
const accept = this.getAndCheckPayloadFormat(tableContext);
const [tableResult, nextTableName] = await this.metadataStore.queryTable(context, account, options.queryOptions || {}, options.nextTableName);
const response = {
clientRequestId: options.requestId,
requestId: tableContext.contextID,
version: constants_1.TABLE_API_VERSION,
date: context.startTime,
statusCode: 200,
xMsContinuationNextTableName: nextTableName,
value: []
};
const prefix = this.getOdataAnnotationUrlPrefix(tableContext, account);
const annotation = (0, utils_2.getTableOdataAnnotationsForResponse)(account, "", prefix);
if (accept === constants_1.MINIMAL_METADATA_ACCEPT || accept === constants_1.FULL_METADATA_ACCEPT) {
response.odatametadata = annotation.odatametadata;
}
response.value = tableResult.map((item) => (0, utils_2.getTablePropertiesOdataAnnotationsForResponse)(item.table, account, prefix, accept));
this.updateResponseAccept(tableContext, accept);
return response;
}
// TODO: Filter odata types per accept settings
async insertEntity(_tableName, options, context) {
const tableContext = new TableStorageContext_1.default(context);
const account = this.getAndCheckAccountName(tableContext);
const table = this.getAndCheckTableName(tableContext);
const accept = this.getAndCheckPayloadFormat(tableContext);
const prefer = this.getAndCheckPreferHeader(tableContext);
this.checkBodyLimit(context, context.request?.getBody());
// currently unable to use checking functions as the partitionKey
// and rowKey are not coming through the context.
// const partitionKey = this.getAndCheckPartitionKey(tableContext);
// const rowKey = this.getAndCheckRowKey(tableContext);
if (options.tableEntityProperties == undefined ||
!options.tableEntityProperties ||
// rowKey and partitionKey may be empty string
options.tableEntityProperties.PartitionKey === null ||
options.tableEntityProperties.PartitionKey == undefined ||
options.tableEntityProperties.RowKey === null ||
options.tableEntityProperties.RowKey === undefined) {
throw StorageErrorFactory_1.default.getPropertiesNeedValue(context);
}
// check that key properties are valid
this.validateKey(context, options.tableEntityProperties.PartitionKey);
this.validateKey(context, options.tableEntityProperties.RowKey);
this.checkProperties(context, options.tableEntityProperties);
// need to remove the etags from the properties to avoid errors
// https://docs.microsoft.com/en-us/rest/api/storageservices/insert-entity
options.tableEntityProperties = this.removeEtagProperty(options.tableEntityProperties);
const entity = this.createPersistedEntity(context, options, options.tableEntityProperties?.PartitionKey, options.tableEntityProperties?.RowKey);
let normalizedEntity;
try {
normalizedEntity = new NormalizedEntity_1.NormalizedEntity(entity);
normalizedEntity.normalize();
}
catch (e) {
this.logger.error(`TableHandler:insertEntity() ${e.name} ${JSON.stringify(e.stack)}`, context.contextID);
throw StorageErrorFactory_1.default.getInvalidInput(context);
}
await this.metadataStore.insertTableEntity(context, table, account, entity, tableContext.batchId);
const response = {
clientRequestId: options.requestId,
requestId: tableContext.contextID,
version: constants_1.TABLE_API_VERSION,
date: context.startTime,
statusCode: 201,
eTag: entity.eTag
};
if (prefer === constants_1.RETURN_CONTENT || prefer === undefined) {
const body = {};
const annotation = (0, utils_2.getEntityOdataAnnotationsForResponse)(account, table, this.getOdataAnnotationUrlPrefix(tableContext, account), options.tableEntityProperties?.PartitionKey, options.tableEntityProperties?.RowKey, accept);
if (accept === constants_1.MINIMAL_METADATA_ACCEPT) {
body["odata.metadata"] = annotation.odatametadata;
body["odata.etag"] = entity.eTag;
}
if (accept === constants_1.FULL_METADATA_ACCEPT) {
body["odata.metadata"] = annotation.odatametadata;
body["odata.type"] = annotation.odatatype;
body["odata.id"] = annotation.odataid;
body["odata.etag"] = entity.eTag;
body["odata.editLink"] = annotation.odataeditLink;
}
// for (const key of Object.keys(entity.properties)) {
// body[key] = entity.properties[key];
// }
// response.body = new BufferStream(Buffer.from(JSON.stringify(body)));
const rawResponse = normalizedEntity.toResponseString(accept, body);
this.logger.debug(`TableHandler:insertEntity() Raw response string is ${JSON.stringify(rawResponse)}`, context.contextID);
response.body = new BufferStream_1.default(Buffer.from(rawResponse));
}
this.updateResponseAccept(tableContext, accept);
this.updateResponsePrefer(response, tableContext);
return response;
}
createPersistedEntity(context, options, partitionKey, rowKey) {
const modTime = (0, utils_3.truncatedISO8061Date)(context.startTime, true, true);
const eTag = (0, utils_1.newTableEntityEtag)(modTime);
const entity = {
PartitionKey: partitionKey,
RowKey: rowKey,
properties: options.tableEntityProperties === undefined
? {}
: options.tableEntityProperties,
lastModifiedTime: modTime,
eTag
};
return entity;
}
static getAndCheck(key, getFromContext, contextForThrow) {
if (key !== undefined) {
return key;
}
const fromContext = getFromContext();
if (fromContext === undefined) {
throw StorageErrorFactory_1.default.getPropertiesNeedValue(contextForThrow);
}
return fromContext;
}
static getAndCheckKeys(partitionKey, rowKey, tableContext, contextForThrow) {
partitionKey = TableHandler.getAndCheck(partitionKey, () => tableContext.partitionKey, contextForThrow);
rowKey = TableHandler.getAndCheck(rowKey, () => tableContext.rowKey, contextForThrow);
return [partitionKey, rowKey];
}
// TODO: Create data structures to hold entity properties and support serialize, merge, deserialize, filter
// Note: Batch is using the partition key and row key args, handler receives these values from middleware via
// context
async updateEntity(_table, partitionKey, rowKey, options, context) {
const tableContext = new TableStorageContext_1.default(context);
const account = this.getAndCheckAccountName(tableContext);
const table = this.getAndCheckTableName(tableContext);
this.checkEntityLimit(context, context.request?.getBody());
[partitionKey, rowKey] = TableHandler.getAndCheckKeys(partitionKey, rowKey, tableContext, context);
const ifMatch = options.ifMatch;
if (!options.tableEntityProperties) {
throw StorageErrorFactory_1.default.getPropertiesNeedValue(context);
}
if (options.tableEntityProperties.PartitionKey !== partitionKey ||
options.tableEntityProperties.RowKey !== rowKey) {
this.logger.warn(`TableHandler:updateEntity() Incoming PartitionKey:${partitionKey} RowKey:${rowKey} in URL parameters don't align with entity body PartitionKey:${options.tableEntityProperties.PartitionKey} RowKey:${options.tableEntityProperties.RowKey}.`);
}
this.checkProperties(context, options.tableEntityProperties);
// Test if etag is available
// this is considered an upsert if no etag header, an empty header is an error.
// https://docs.microsoft.com/en-us/rest/api/storageservices/insert-or-replace-entity
if (ifMatch === "") {
throw StorageErrorFactory_1.default.getPreconditionFailed(context);
}
if (options?.ifMatch && options.ifMatch !== "*") {
if ((0, utils_1.isEtagValid)(options.ifMatch)) {
throw StorageErrorFactory_1.default.getInvalidInput(context);
}
}
// check that key properties are valid
this.validateKey(context, partitionKey);
this.validateKey(context, rowKey);
options.tableEntityProperties = this.removeEtagProperty(options.tableEntityProperties);
const entity = this.createPersistedEntity(context, options, partitionKey, rowKey);
let normalizedEntity;
try {
normalizedEntity = new NormalizedEntity_1.NormalizedEntity(entity);
normalizedEntity.normalize();
}
catch (e) {
this.logger.error(`TableHandler:updateEntity() ${e.name} ${JSON.stringify(e.stack)}`, context.contextID);
throw StorageErrorFactory_1.default.getInvalidInput(context);
}
await this.metadataStore.insertOrUpdateTableEntity(context, table, account, entity, ifMatch, tableContext.batchId);
// Response definition
const response = {
clientRequestId: options.requestId,
requestId: tableContext.contextID,
version: constants_1.TABLE_API_VERSION,
date: context.startTime,
eTag: entity.eTag,
statusCode: 204
};
return response;
}
async mergeEntity(_table, partitionKey, rowKey, options, context) {
const tableContext = new TableStorageContext_1.default(context);
const account = this.getAndCheckAccountName(tableContext);
const table = this.getAndCheckTableName(tableContext);
this.checkEntityLimit(context, context.request?.getBody());
[partitionKey, rowKey] = TableHandler.getAndCheckKeys(partitionKey, rowKey, tableContext, context);
this.checkMergeRequest(options, context, partitionKey, rowKey);
options.tableEntityProperties = this.removeEtagProperty(options.tableEntityProperties);
const entity = this.createPersistedEntity(context, options, partitionKey, rowKey);
let normalizedEntity;
try {
normalizedEntity = new NormalizedEntity_1.NormalizedEntity(entity);
normalizedEntity.normalize();
}
catch (e) {
this.logger.error(`TableHandler:mergeEntity() ${e.name} ${JSON.stringify(e.stack)}`, context.contextID);
throw StorageErrorFactory_1.default.getInvalidInput(context);
}
await this.metadataStore.insertOrMergeTableEntity(context, table, account, entity, options.ifMatch, tableContext.batchId);
const response = {
clientRequestId: options.requestId,
requestId: tableContext.contextID,
version: constants_1.TABLE_API_VERSION,
date: context.startTime,
statusCode: 204,
eTag: entity.eTag
};
return response;
}
/**
* Check that the properties are valid on merge request
*
* @private
* @param {Models.TableMergeEntityOptionalParams} options
* @param {Context} context
* @param {string} partitionKey
* @param {string} rowKey
* @memberof TableHandler
*/
checkMergeRequest(options, context, partitionKey, rowKey) {
// some SDKs, like Azure Cosmos Table do not always send properties
// and we might merge just row and partition keys like upsert
// this caused issues and has been removed for now.
// if (!options.tableEntityProperties) {
// throw StorageErrorFactory.getPropertiesNeedValue(context);
// }
if (options.tableEntityProperties !== undefined) {
if (options.tableEntityProperties.PartitionKey !== partitionKey ||
options.tableEntityProperties.RowKey !== rowKey) {
this.logger.warn(`TableHandler:mergeEntity() Incoming PartitionKey:${partitionKey} RowKey:${rowKey} in URL parameters don't align with entity body PartitionKey:${options.tableEntityProperties.PartitionKey} RowKey:${options.tableEntityProperties.RowKey}.`);
}
this.checkProperties(context, options.tableEntityProperties);
}
this.checkMergeIfMatch(options, context);
// check that key properties are valid
this.validateKey(context, partitionKey);
this.validateKey(context, rowKey);
}
/**
* Check that the ifMatch header is valid on merge request
*
* @private
* @param {Models.TableMergeEntityOptionalParams} options
* @param {Context} context
* @memberof TableHandler
*/
checkMergeIfMatch(options, context) {
if (options?.ifMatch && options.ifMatch !== "*" && options.ifMatch !== "") {
if ((0, utils_1.isEtagValid)(options.ifMatch)) {
throw StorageErrorFactory_1.default.getInvalidOperation(context);
}
}
}
async deleteEntity(_table, partitionKey, rowKey, ifMatch, options, context) {
const tableContext = new TableStorageContext_1.default(context);
const accountName = tableContext.account;
[partitionKey, rowKey] = TableHandler.getAndCheckKeys(partitionKey, rowKey, tableContext, context);
if (ifMatch === "" || ifMatch === undefined) {
throw StorageErrorFactory_1.default.getPreconditionFailed(context);
}
if (ifMatch !== "*" && (0, utils_1.isEtagValid)(ifMatch)) {
throw StorageErrorFactory_1.default.getInvalidInput(context);
}
// currently the props are not coming through as args, so we take them from the table context
await this.metadataStore.deleteTableEntity(context, tableContext.tableName, accountName, partitionKey, rowKey, ifMatch, tableContext.batchId);
return {
statusCode: 204,
date: tableContext.startTime,
clientRequestId: options.requestId,
requestId: tableContext.contextID,
version: constants_1.TABLE_API_VERSION
};
}
async queryEntities(_table, options, context) {
const tableContext = new TableStorageContext_1.default(context);
const table = this.getAndCheckTableName(tableContext);
const account = this.getAndCheckAccountName(tableContext);
const accept = this.getAndCheckPayloadFormat(tableContext);
this.checkBodyLimit(context, context.request?.getBody());
const [result, nextPartitionKey, nextRowKey] = await this.metadataStore.queryTableEntities(context, account, table, options.queryOptions || {}, options.nextPartitionKey, options.nextRowKey);
const response = {
clientRequestId: options.requestId,
requestId: tableContext.contextID,
version: constants_1.TABLE_API_VERSION,
date: context.startTime,
xMsContinuationNextPartitionKey: nextPartitionKey,
xMsContinuationNextRowKey: nextRowKey,
statusCode: 200
};
let selectSet;
const selectArray = options.queryOptions?.select
?.split(",")
.filter((item) => {
return typeof item === "string" && item.length > 0;
})
.map((item) => item.trim());
if (selectArray && selectArray.length > 0) {
selectSet = new Set(selectArray);
}
const entities = [];
const odataPrefix = this.getOdataAnnotationUrlPrefix(tableContext, account);
result.forEach((element) => {
const entity = {};
const annotation = (0, utils_2.getEntityOdataAnnotationsForResponse)(account, table, odataPrefix, element.PartitionKey, element.RowKey, accept);
if (accept === constants_1.MINIMAL_METADATA_ACCEPT ||
accept === constants_1.FULL_METADATA_ACCEPT) {
entity["odata.etag"] = element.eTag;
}
if (accept === constants_1.FULL_METADATA_ACCEPT) {
entity["odata.type"] = annotation.odatatype;
entity["odata.id"] = annotation.odataid;
entity["odata.editLink"] = annotation.odataeditLink;
}
const normalizedEntity = new NormalizedEntity_1.NormalizedEntity(element);
entities.push(normalizedEntity.toResponseString(accept, entity, selectSet));
});
const odatametadata = (0, utils_2.getEntityOdataAnnotationsForResponse)(account, table, odataPrefix, "", "", accept).odatametadata;
const odatametadataPariString = odatametadata
? `"odata.metadata":${JSON.stringify(odatametadata)},`
: "";
const body = `{${odatametadataPariString}"value":[${entities.join(",")}]}`;
response.body = new BufferStream_1.default(Buffer.from(body));
this.logger.debug(`TableHandler:queryEntities() Raw response string is ${JSON.stringify(body)}`, context.contextID);
this.updateResponseAccept(tableContext, accept);
return response;
}
async queryEntitiesWithPartitionAndRowKey(_table, partitionKey, rowKey, options, context) {
const tableContext = new TableStorageContext_1.default(context);
const account = this.getAndCheckAccountName(tableContext);
const table = _table ? _table : this.getAndCheckTableName(tableContext);
[partitionKey, rowKey] = TableHandler.getAndCheckKeys(partitionKey, rowKey, tableContext, context);
const accept = this.getAndCheckPayloadFormat(tableContext);
const entity = await this.metadataStore.queryTableEntitiesWithPartitionAndRowKey(context, table, account, partitionKey, rowKey, tableContext.batchId);
if (entity === undefined || entity === null) {
throw StorageErrorFactory_1.default.getEntityNotFound(context);
}
const response = {
statusCode: 200,
date: tableContext.startTime,
clientRequestId: options.requestId,
requestId: context.contextID,
eTag: entity.eTag,
version: constants_1.TABLE_API_VERSION
};
const body = {};
const annotation = (0, utils_2.getEntityOdataAnnotationsForResponse)(account, table, this.getOdataAnnotationUrlPrefix(tableContext, account), partitionKey, rowKey, accept);
if (accept === constants_1.MINIMAL_METADATA_ACCEPT) {
body["odata.metadata"] = annotation.odatametadata;
body["odata.etag"] = entity.eTag;
}
if (accept === constants_1.FULL_METADATA_ACCEPT) {
body["odata.metadata"] = annotation.odatametadata;
body["odata.type"] = annotation.odatatype;
body["odata.id"] = annotation.odataid;
body["odata.etag"] = entity.eTag;
body["odata.editLink"] = annotation.odataeditLink;
}
let selectSet;
const selectArray = options.queryOptions?.select
?.split(",")
.filter((item) => {
return typeof item === "string" && item.length > 0;
})
.map((item) => item.trim());
if (selectArray && selectArray.length > 0) {
selectSet = new Set(selectArray);
}
const normalizedEntity = new NormalizedEntity_1.NormalizedEntity(entity);
const rawResponse = normalizedEntity.toResponseString(accept, body, selectSet);
response.body = new BufferStream_1.default(Buffer.from(rawResponse));
this.logger.debug(`TableHandler:queryEntities() Raw response string is ${JSON.stringify(rawResponse)}`, context.contextID);
this.updateResponseAccept(tableContext, accept);
return response;
}
async mergeEntityWithMerge(table, partitionKey, rowKey, options, context) {
return this.mergeEntity(table, partitionKey, rowKey, options, context);
}
/**
* Get table access policies.
* @param {string} table
* @param {Models.TableGetAccessPolicyOptionalParams} options
* @param {Context} context
* @returns {Promise<Models.TableGetAccessPolicyResponse>}
* @memberof TableHandler
*/
async getAccessPolicy(table, options, context) {
const tableContext = new TableStorageContext_1.default(context);
const accountName = this.getAndCheckAccountName(tableContext);
const tableName = this.getAndCheckTableName(tableContext);
const foundTable = await this.metadataStore.getTable(accountName, tableName, context);
const response = [];
const responseArray = response;
const responseObject = response;
if (foundTable.tableAcl !== undefined) {
responseArray.push(...foundTable.tableAcl);
}
responseObject.date = context.startTime;
responseObject.requestId = context.contextID;
responseObject.version = constants_1.TABLE_API_VERSION;
responseObject.statusCode = 200;
responseObject.clientRequestId = options.requestId;
return response;
}
/**
* Set table access policies.
* @param {string} table
* @param {Models.TableSetAccessPolicyOptionalParams} options
* @param {Context} context
* @returns {Promise<Models.TableSetAccessPolicyResponse>}
* @memberof TableHandler
*/
async setAccessPolicy(table, options, context) {
const tableContext = new TableStorageContext_1.default(context);
const accountName = this.getAndCheckAccountName(tableContext);
const tableName = this.getAndCheckTableName(tableContext);
this.checkBodyLimit(context, context.request?.getBody());
// The policy number should be within 5, the permission should follow the Table permission.
// See as https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas.
if (options.tableAcl !== undefined) {
if (options.tableAcl.length > 5) {
throw StorageErrorFactory_1.default.getInvalidXmlDocument(context);
}
for (const acl of options.tableAcl) {
const permission = acl.accessPolicy.permission;
for (const item of permission) {
if (!constants_1.TABLE_SERVICE_PERMISSION.includes(item)) {
throw StorageErrorFactory_1.default.getInvalidXmlDocument(context);
}
}
}
}
await this.metadataStore.setTableACL(accountName, tableName, context, options.tableAcl);
const response = {
date: context.startTime,
requestId: context.contextID,
version: constants_1.TABLE_API_VERSION,
statusCode: 204,
clientRequestId: options.requestId
};
return response;
}
/**
* Processes an entity group transaction request / batch request
*
* @param {NodeJS.ReadableStream} body
* @param {string} multipartContentType
* @param {number} contentLength
* @param {Models.TableBatchOptionalParams} options
* @param {Context} context
* @return {*} {Promise<Models.TableBatchResponse>}
* @memberof TableHandler
*/
async batch(body, multipartContentType, contentLength, options, context) {
const tableCtx = new TableStorageContext_1.default(context);
if (contentLength && contentLength > constants_1.BODY_SIZE_MAX) {
throw StorageErrorFactory_1.default.getRequestBodyTooLarge(context);
}
else {
this.checkBodyLimit(context, context.request?.getBody());
}
const contentTypeResponse = tableCtx.request
?.getHeader("content-type")
?.replace("batch", "batchresponse");
const tableBatchManager = new TableBatchOrchestrator_1.default(tableCtx, this, this.metadataStore);
const requestBody = await TableBatchUtils_1.default.StreamToString(body);
this.logger.debug(`TableHandler:batch() Raw request string is ${JSON.stringify(requestBody)}`, context.contextID);
const response = await tableBatchManager.processBatchRequestAndSerializeResponse(requestBody);
this.logger.debug(`TableHandler:batch() Raw response string is ${JSON.stringify(response)}`, context.contextID);
// need to convert response to NodeJS.ReadableStream
body = (0, to_readable_stream_1.default)(response);
return {
contentType: contentTypeResponse,
requestId: tableCtx.contextID,
version: constants_1.TABLE_API_VERSION,
date: context.startTime,
statusCode: 202,
body
};
}
getOdataAnnotationUrlPrefix(tableContext, account) {
// TODO: Get protocol, host and port from Azurite server instance
let protocol = "http";
let host = `${constants_1.DEFAULT_TABLE_SERVER_HOST_NAME}:${constants_1.DEFAULT_TABLE_LISTENING_PORT}/${account}`;
if (tableContext.request !== undefined) {
host = `${tableContext.request.getHeader("host")}/${account}` || host;
protocol = tableContext.request.getProtocol();
}
return `${protocol}://${host}`;
}
getAndCheckPayloadFormat(context) {
const format = (0, utils_2.getPayloadFormat)(context);
if (format !== constants_1.NO_METADATA_ACCEPT &&
format !== constants_1.MINIMAL_METADATA_ACCEPT &&
format !== constants_1.FULL_METADATA_ACCEPT) {
throw StorageErrorFactory_1.default.getAtomFormatNotSupported(context);
}
return format;
}
getAndCheckPreferHeader(context) {
const prefer = context.request.getHeader(constants_1.HeaderConstants.PREFER);
return prefer;
}
getAndCheckAccountName(context) {
const account = context.account;
if (account === undefined) {
throw StorageErrorFactory_1.default.getAccountNameEmpty(context);
}
return account;
}
getAndCheckTableName(context) {
const table = context.tableName;
if (table === undefined) {
throw StorageErrorFactory_1.default.getTableNameEmpty(context);
}
return table;
}
updateResponseAccept(context, accept) {
if (accept !== undefined) {
context.response.setContentType(accept);
}
return context;
}
updateResponsePrefer(response, context) {
const prefer = context.request.getHeader(constants_1.HeaderConstants.PREFER);
if (prefer === constants_1.RETURN_NO_CONTENT) {
response.statusCode = 204;
response.preferenceApplied = constants_1.RETURN_NO_CONTENT;
}
if (prefer === constants_1.RETURN_CONTENT || prefer === undefined) {
response.statusCode = 201;
response.preferenceApplied = constants_1.RETURN_CONTENT;
}
return response;
}
/**
* Checks if key is valid based on rules outlined here:
* https://docs.microsoft.com/en-us/rest/api/storageservices/Understanding-the-Table-Service-Data-Model#characters-disallowed-in-key-fields
* Checks that key length is less than 1Kib (1024 chars)
* Checks for invalid chars
* @private
* @param {string} key
* @return {*} {boolean}
* @memberof TableHandler
*/
validateKey(context, key) {
// key is a string value that may be up to 1 KiB in size.
// although a little arbitrary, for performance and
// generally a better idea, choosing a shorter length
if (key !== undefined && key.length > constants_1.DEFAULT_KEY_MAX_LENGTH) {
throw StorageErrorFactory_1.default.getInvalidInput(context);
}
const match = key.match(/[\u0000-\u001f\u007f-\u009f\/\\\#\?]+/);
if (match !== null && match.length > 0) {
throw StorageErrorFactory_1.default.getInvalidInput(context);
}
}
/**
* Checks that properties are valid according to rules given here:
* https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-the-table-service-data-model#property-types
*
* @private
* @param {Context} context
* @param {{
* [propertyName: string]: any;
* }} properties
* @memberof TableHandler
*/
checkProperties(context, properties) {
for (const prop in properties) {
if (properties.hasOwnProperty(prop)) {
if (null !== properties[prop] &&
undefined !== properties[prop].length) {
const typeKey = `${prop}${constants_1.ODATA_TYPE}`;
let type;
if (properties[typeKey]) {
type = (0, IEdmType_1.getEdmType)(properties[typeKey]);
}
if (type === IEdmType_1.EdmType.Binary) {
if (Buffer.from(properties[prop], "base64").length > 64 * 1024) {
throw StorageErrorFactory_1.default.getPropertyValueTooLargeError(context);
}
}
else if (properties[prop].length > 32 * 1024) {
throw StorageErrorFactory_1.default.getPropertyValueTooLargeError(context);
}
else if (properties[prop] === undefined ||
properties[prop] === "") {
const propertyType = properties[`${prop}${constants_1.ODATA_TYPE}`];
if (propertyType !== undefined && propertyType === "Edm.DateTime") {
throw StorageErrorFactory_1.default.getInvalidInput(context);
}
}
}
}
}
}
/**
* Checks the size of the body against service limit as per documentation
* https://docs.microsoft.com/en-us/troubleshoot/azure/general/request-body-large
*
* @private
* @param {Context} context
* @param {string} body
* @memberof TableHandler
*/
checkBodyLimit(context, body) {
if (undefined !== body && (0, utils_1.getUTF8ByteSize)(body) > constants_1.BODY_SIZE_MAX) {
throw StorageErrorFactory_1.default.getRequestBodyTooLarge(context);
}
}
checkEntityLimit(context, body) {
if (undefined !== body && (0, utils_1.getUTF8ByteSize)(body) > constants_1.ENTITY_SIZE_MAX) {
throw StorageErrorFactory_1.default.getEntityTooLarge(context);
}
}
/**
* remove the etag property to avoid duplicate odata.etag error
*
* @private
* @param {{
* [propertyName: string]: any;
* }} tableEntityProperties
* @return {*} {({ [propertyName: string]: any } | undefined)}
* @memberof TableHandler
*/
removeEtagProperty(tableEntityProperties) {
if (tableEntityProperties) {
delete tableEntityProperties["odata.etag"];
}
return tableEntityProperties;
}
}
exports.default = TableHandler;
//# sourceMappingURL=TableHandler.js.map
;