@azure/cosmos
Version:
Microsoft Azure Cosmos DB Service Node.js SDK for NOSQL API
912 lines (911 loc) • 37.5 kB
JavaScript
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
var Items_exports = {};
__export(Items_exports, {
Items: () => Items
});
module.exports = __toCommonJS(Items_exports);
var import_ChangeFeedIterator = require("../../ChangeFeedIterator.js");
var import_common = require("../../common/index.js");
var import_extractPartitionKey = require("../../extractPartitionKey.js");
var import_queryIterator = require("../../queryIterator.js");
var import_Item = require("./Item.js");
var import_ItemResponse = require("./ItemResponse.js");
var import_batch = require("../../utils/batch.js");
var import_typeChecks = require("../../utils/typeChecks.js");
var import_hash = require("../../utils/hashing/hash.js");
var import_routing = require("../../routing/index.js");
var import_documents = require("../../documents/index.js");
var import_changeFeedUtils = require("../../client/ChangeFeed/changeFeedUtils.js");
var import_DiagnosticNodeInternal = require("../../diagnostics/DiagnosticNodeInternal.js");
var import_diagnostics = require("../../utils/diagnostics.js");
var import_core_util = require("@azure/core-util");
var import_ClientUtils = require("../ClientUtils.js");
var import_ChangeFeedIteratorBuilder = require("../ChangeFeed/ChangeFeedIteratorBuilder.js");
var import_TypeMarker = require("../../encryption/enums/TypeMarker.js");
var import_EncryptionItemQueryIterator = require("../../encryption/EncryptionItemQueryIterator.js");
var import_request = require("../../request/index.js");
var import_BulkHelper = require("../../bulk/BulkHelper.js");
function isChangeFeedOptions(options) {
return options && !((0, import_typeChecks.isPrimitivePartitionKeyValue)(options) || Array.isArray(options));
}
class Items {
/**
* Create an instance of {@link Items} linked to the parent {@link Container}.
* @param container - The parent container.
* @hidden
*/
constructor(container, clientContext) {
this.container = container;
this.clientContext = clientContext;
this.partitionKeyRangeCache = this.clientContext.partitionKeyRangeCache;
}
partitionKeyRangeCache;
query(query, options = {}) {
const path = (0, import_common.getPathFromLink)(this.container.url, import_common.ResourceType.item);
const id = (0, import_common.getIdFromLink)(this.container.url);
const fetchFunction = async (diagnosticNode, innerOptions, correlatedActivityId) => {
let internalPartitionKey;
if (options.partitionKey) {
internalPartitionKey = (0, import_documents.convertToInternalPartitionKey)(options.partitionKey);
}
const isPartitionLevelFailOverEnabled = this.clientContext.isPartitionLevelFailOverEnabled();
const partitionKeyRangeId = await (0, import_ClientUtils.computePartitionKeyRangeId)(
diagnosticNode,
internalPartitionKey,
this.partitionKeyRangeCache,
isPartitionLevelFailOverEnabled,
this.container
);
const response = await this.clientContext.queryFeed({
path,
resourceType: import_common.ResourceType.item,
resourceId: id,
resultFn: (result) => result ? result.Documents : [],
query,
options: innerOptions,
partitionKey: options.partitionKey,
diagnosticNode,
correlatedActivityId,
partitionKeyRangeId
});
return response;
};
let iterator;
if (this.clientContext.enableEncryption) {
iterator = new import_EncryptionItemQueryIterator.EncryptionItemQueryIterator(
this.clientContext,
query,
options,
fetchFunction,
this.container
);
} else {
iterator = new import_queryIterator.QueryIterator(
this.clientContext,
query,
options,
fetchFunction,
this.container.url,
import_common.ResourceType.item
);
}
return iterator;
}
/**
* Queries all items in an encrypted container.
* @param queryBuilder - Query configuration for the operation. See {@link SqlQuerySpec} for more info on how to build a query on encrypted properties.
* @param options - Used for modifying the request (for instance, specifying the partition key).
* @example Read all items to array.
* ```ts snippet:ItemsQueryEncryptedItems
* import { CosmosClient, EncryptionQueryBuilder } from "@azure/cosmos";
*
* const endpoint = "https://your-account.documents.azure.com";
* const key = "<database account masterkey>";
* const client = new CosmosClient({ endpoint, key });
*
* const { database } = await client.databases.createIfNotExists({ id: "Test Database" });
*
* const { container } = await database.containers.createIfNotExists({ id: "Test Container" });
*
* const queryBuilder = new EncryptionQueryBuilder(
* `SELECT firstname FROM Families f WHERE f.lastName = @lastName`,
* );
* queryBuilder.addParameter("@lastName", "Hendricks", "/lastname");
* const queryIterator = await container.items.getEncryptionQueryIterator(queryBuilder);
* const { resources: items } = await queryIterator.fetchAll();
* ```
*/
async getEncryptionQueryIterator(queryBuilder, options = {}) {
const encryptionSqlQuerySpec = queryBuilder.toEncryptionSqlQuerySpec();
const sqlQuerySpec = await this.buildSqlQuerySpec(encryptionSqlQuerySpec);
if (this.clientContext.enableEncryption && options.partitionKey) {
await this.container.checkAndInitializeEncryption();
const { partitionKeyList, encryptedCount } = await this.container.encryptionProcessor.getEncryptedPartitionKeyValue([
options.partitionKey
]);
if (encryptedCount > 0) {
options.partitionKey = partitionKeyList[0];
}
}
const iterator = this.query(sqlQuerySpec, options);
return iterator;
}
async buildSqlQuerySpec(encryptionSqlQuerySpec) {
let encryptionParameters = encryptionSqlQuerySpec.parameters;
const sqlQuerySpec = {
query: encryptionSqlQuerySpec.query,
parameters: []
};
encryptionParameters = (0, import_common.copyObject)(encryptionParameters);
for (const parameter of encryptionParameters) {
let value;
if (parameter.type !== void 0 || parameter.type !== import_TypeMarker.TypeMarker.Null) {
value = await this.container.encryptionProcessor.encryptQueryParameter(
parameter.path,
parameter.value,
parameter.path === "/id",
parameter.type
);
}
sqlQuerySpec.parameters.push({ name: parameter.name, value });
}
return sqlQuerySpec;
}
readChangeFeed(partitionKeyOrChangeFeedOptions, changeFeedOptions) {
if (isChangeFeedOptions(partitionKeyOrChangeFeedOptions)) {
return this.changeFeed(partitionKeyOrChangeFeedOptions);
} else {
return this.changeFeed(partitionKeyOrChangeFeedOptions, changeFeedOptions);
}
}
changeFeed(partitionKeyOrChangeFeedOptions, changeFeedOptions) {
let partitionKey;
if (!changeFeedOptions && isChangeFeedOptions(partitionKeyOrChangeFeedOptions)) {
partitionKey = void 0;
changeFeedOptions = partitionKeyOrChangeFeedOptions;
} else if (partitionKeyOrChangeFeedOptions !== void 0 && !isChangeFeedOptions(partitionKeyOrChangeFeedOptions)) {
partitionKey = partitionKeyOrChangeFeedOptions;
}
if (!changeFeedOptions) {
changeFeedOptions = {};
}
const path = (0, import_common.getPathFromLink)(this.container.url, import_common.ResourceType.item);
const id = (0, import_common.getIdFromLink)(this.container.url);
return new import_ChangeFeedIterator.ChangeFeedIterator(this.clientContext, id, path, partitionKey, changeFeedOptions);
}
/**
* Returns an iterator to iterate over pages of changes. The iterator returned can be used to fetch changes for a single partition key, feed range or an entire container.
*
* @example
* ```ts snippet:ReadmeSampleChangeFeedPullModelIteratorPartitionKey
* import {
* CosmosClient,
* PartitionKeyDefinitionVersion,
* PartitionKeyKind,
* ChangeFeedStartFrom,
* } from "@azure/cosmos";
*
* const endpoint = "https://your-account.documents.azure.com";
* const key = "<database account masterkey>";
* const client = new CosmosClient({ endpoint, key });
*
* const { database } = await client.databases.createIfNotExists({ id: "Test Database" });
*
* const containerDefinition = {
* id: "Test Database",
* partitionKey: {
* paths: ["/name", "/address/zip"],
* version: PartitionKeyDefinitionVersion.V2,
* kind: PartitionKeyKind.MultiHash,
* },
* };
* const { container } = await database.containers.createIfNotExists(containerDefinition);
*
* const partitionKey = "some-partition-Key-value";
* const options = {
* changeFeedStartFrom: ChangeFeedStartFrom.Beginning(partitionKey),
* };
*
* const iterator = container.items.getChangeFeedIterator(options);
*
* while (iterator.hasMoreResults) {
* const response = await iterator.readNext();
* // process this response
* }
* ```
*/
getChangeFeedIterator(changeFeedIteratorOptions) {
const cfOptions = changeFeedIteratorOptions !== void 0 ? changeFeedIteratorOptions : {};
(0, import_changeFeedUtils.validateChangeFeedIteratorOptions)(cfOptions);
const iterator = new import_ChangeFeedIteratorBuilder.ChangeFeedIteratorBuilder(
cfOptions,
this.clientContext,
this.container,
this.partitionKeyRangeCache
);
return iterator;
}
readAll(options) {
return this.query("SELECT * from c", options);
}
/**
* Create an item.
*
* Any provided type, T, is not necessarily enforced by the SDK.
* You may get more or less properties and it's up to your logic to enforce it.
*
* There is no set schema for JSON items. They may contain any number of custom properties.
*
* @param body - Represents the body of the item. Can contain any number of user defined properties.
* @param options - Used for modifying the request (for instance, specifying the partition key).
* @example Create an item.
* ```ts snippet:ContainerItems
* import { CosmosClient } from "@azure/cosmos";
*
* const endpoint = "https://your-account.documents.azure.com";
* const key = "<database account masterkey>";
* const client = new CosmosClient({ endpoint, key });
*
* const { database } = await client.databases.createIfNotExists({ id: "Test Database" });
*
* const { container } = await database.containers.createIfNotExists({ id: "Test Container" });
*
* const { resource: createdItem } = await container.items.create({
* id: "<item id>",
* properties: {},
* });
* ```
*/
async create(body, options = {}) {
return (0, import_diagnostics.withDiagnostics)(async (diagnosticNode) => {
if ((body.id === void 0 || body.id === "") && !options.disableAutomaticIdGeneration) {
body.id = (0, import_core_util.randomUUID)();
}
const partitionKeyDefinition = await (0, import_ClientUtils.readPartitionKeyDefinition)(
diagnosticNode,
this.container
);
let partitionKey = (0, import_extractPartitionKey.extractPartitionKeys)(body, partitionKeyDefinition);
let response;
try {
if (this.clientContext.enableEncryption) {
await this.container.checkAndInitializeEncryption();
options.containerRid = this.container._rid;
body = (0, import_common.copyObject)(body);
diagnosticNode.beginEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsEncryptOperation
);
const { body: encryptedBody, propertiesEncryptedCount } = await this.container.encryptionProcessor.encrypt(body);
body = encryptedBody;
diagnosticNode.endEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsEncryptOperation,
propertiesEncryptedCount
);
partitionKey = (0, import_extractPartitionKey.extractPartitionKeys)(body, partitionKeyDefinition);
}
const err = {};
if (!(0, import_common.isItemResourceValid)(body, err)) {
throw err;
}
const path = (0, import_common.getPathFromLink)(this.container.url, import_common.ResourceType.item);
const id = (0, import_common.getIdFromLink)(this.container.url);
const isPartitionLevelFailOverEnabled = this.clientContext.isPartitionLevelFailOverEnabled();
const partitionKeyRangeId = await (0, import_ClientUtils.computePartitionKeyRangeId)(
diagnosticNode,
partitionKey,
this.partitionKeyRangeCache,
isPartitionLevelFailOverEnabled,
this.container,
partitionKeyDefinition
);
response = await this.clientContext.create({
body,
path,
resourceType: import_common.ResourceType.item,
resourceId: id,
diagnosticNode,
options,
partitionKey,
partitionKeyRangeId
});
} catch (error) {
if (this.clientContext.enableEncryption) {
await this.container.throwIfRequestNeedsARetryPostPolicyRefresh(error);
}
throw error;
}
if (this.clientContext.enableEncryption) {
try {
diagnosticNode.beginEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsDecryptOperation
);
const { body: decryptedResult, propertiesDecryptedCount } = await this.container.encryptionProcessor.decrypt(response.result);
diagnosticNode.endEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsDecryptOperation,
propertiesDecryptedCount
);
response.result = decryptedResult;
partitionKey = (0, import_extractPartitionKey.extractPartitionKeys)(response.result, partitionKeyDefinition);
} catch (error) {
const decryptionError = new import_request.ErrorResponse(
`Item creation was successful but response decryption failed: + ${error.message}`
);
decryptionError.code = import_common.StatusCodes.ServiceUnavailable;
throw decryptionError;
}
}
const ref = new import_Item.Item(
this.container,
response.result.id,
this.clientContext,
partitionKey
);
return new import_ItemResponse.ItemResponse(
response.result,
response.headers,
response.code,
response.substatus,
ref,
(0, import_diagnostics.getEmptyCosmosDiagnostics)()
);
}, this.clientContext);
}
async upsert(body, options = {}) {
return (0, import_diagnostics.withDiagnostics)(async (diagnosticNode) => {
if ((body.id === void 0 || body.id === "") && !options.disableAutomaticIdGeneration) {
body.id = (0, import_core_util.randomUUID)();
}
const partitionKeyDefinition = await (0, import_ClientUtils.readPartitionKeyDefinition)(
diagnosticNode,
this.container
);
let partitionKey = (0, import_extractPartitionKey.extractPartitionKeys)(body, partitionKeyDefinition);
let response;
try {
if (this.clientContext.enableEncryption) {
body = (0, import_common.copyObject)(body);
options = options || {};
await this.container.checkAndInitializeEncryption();
options.containerRid = this.container._rid;
diagnosticNode.beginEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsEncryptOperation
);
const { body: encryptedBody, propertiesEncryptedCount } = await this.container.encryptionProcessor.encrypt(body);
body = encryptedBody;
diagnosticNode.endEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsEncryptOperation,
propertiesEncryptedCount
);
partitionKey = (0, import_extractPartitionKey.extractPartitionKeys)(body, partitionKeyDefinition);
}
const err = {};
if (!(0, import_common.isItemResourceValid)(body, err)) {
throw err;
}
const path = (0, import_common.getPathFromLink)(this.container.url, import_common.ResourceType.item);
const id = (0, import_common.getIdFromLink)(this.container.url);
const isPartitionLevelFailOverEnabled = this.clientContext.isPartitionLevelFailOverEnabled();
const partitionKeyRangeId = await (0, import_ClientUtils.computePartitionKeyRangeId)(
diagnosticNode,
partitionKey,
this.partitionKeyRangeCache,
isPartitionLevelFailOverEnabled,
this.container,
partitionKeyDefinition
);
response = await this.clientContext.upsert({
body,
path,
resourceType: import_common.ResourceType.item,
resourceId: id,
options,
partitionKey,
diagnosticNode,
partitionKeyRangeId
});
} catch (error) {
if (this.clientContext.enableEncryption) {
await this.container.throwIfRequestNeedsARetryPostPolicyRefresh(error);
}
throw error;
}
if (this.clientContext.enableEncryption) {
try {
diagnosticNode.beginEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsDecryptOperation
);
const { body: decryptedResult, propertiesDecryptedCount } = await this.container.encryptionProcessor.decrypt(response.result);
diagnosticNode.endEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsDecryptOperation,
propertiesDecryptedCount
);
response.result = decryptedResult;
partitionKey = (0, import_extractPartitionKey.extractPartitionKeys)(response.result, partitionKeyDefinition);
} catch (error) {
const decryptionError = new import_request.ErrorResponse(
`Item upsert was successful but response decryption failed: + ${error.message}`
);
decryptionError.code = import_common.StatusCodes.ServiceUnavailable;
throw decryptionError;
}
}
const ref = new import_Item.Item(
this.container,
response.result.id,
this.clientContext,
partitionKey
);
return new import_ItemResponse.ItemResponse(
response.result,
response.headers,
response.code,
response.substatus,
ref,
(0, import_diagnostics.getEmptyCosmosDiagnostics)()
);
}, this.clientContext);
}
/**
* Execute bulk operations on items.
* @param operations - List of operations
* @param options - used for modifying the request
* @returns list of operation results corresponding to the operations
*
* @example
* ```ts snippet:ItemsExecuteBulkOperations
* import { CosmosClient, OperationInput } from "@azure/cosmos";
*
* const endpoint = "https://your-account.documents.azure.com";
* const key = "<database account masterkey>";
* const client = new CosmosClient({ endpoint, key });
*
* const { database } = await client.databases.createIfNotExists({ id: "Test Database" });
*
* const { container } = await database.containers.createIfNotExists({ id: "Test Container" });
*
* const operations: OperationInput[] = [
* {
* operationType: "Create",
* resourceBody: { id: "doc1", name: "sample", key: "A" },
* },
* {
* operationType: "Upsert",
* partitionKey: "A",
* resourceBody: { id: "doc2", name: "other", key: "A" },
* },
* ];
*
* await container.items.executeBulkOperations(operations);
* ```
*/
async executeBulkOperations(operations, options = {}) {
const bulkHelper = new import_BulkHelper.BulkHelper(
this.container,
this.clientContext,
this.partitionKeyRangeCache,
options
);
return bulkHelper.execute(operations);
}
/**
* Execute bulk operations on items.
* @deprecated Use `executeBulkOperations` instead.
*
* Bulk takes an array of Operations which are typed based on what the operation does.
* The choices are: Create, Upsert, Read, Replace, and Delete
*
* Usage example:
* ```ts snippet:ItemsBulk
* import { CosmosClient, OperationInput } from "@azure/cosmos";
*
* const endpoint = "https://your-account.documents.azure.com";
* const key = "<database account masterkey>";
* const client = new CosmosClient({ endpoint, key });
*
* const { database } = await client.databases.createIfNotExists({ id: "Test Database" });
*
* const { container } = await database.containers.createIfNotExists({ id: "Test Container" });
*
* // partitionKey is optional at the top level if present in the resourceBody
* const operations: OperationInput[] = [
* {
* operationType: "Create",
* resourceBody: { id: "doc1", name: "sample", key: "A" },
* },
* {
* operationType: "Upsert",
* partitionKey: "A",
* resourceBody: { id: "doc2", name: "other", key: "A" },
* },
* ];
*
* await container.items.bulk(operations);
* ```
*
* @param operations - List of operations. Limit 100
* @param bulkOptions - Optional options object to modify bulk behavior. Pass \{ continueOnError: false \} to stop executing operations when one fails. (Defaults to true)
* @param options - Used for modifying the request.
*/
async bulk(operations, bulkOptions, options) {
return (0, import_diagnostics.withDiagnostics)(async (diagnosticNode) => {
const partitionKeyRanges = (await this.partitionKeyRangeCache.onCollectionRoutingMap(this.container.url, diagnosticNode)).getOrderedParitionKeyRanges();
const partitionKeyDefinition = await (0, import_ClientUtils.readPartitionKeyDefinition)(
diagnosticNode,
this.container
);
if (this.clientContext.enableEncryption) {
operations = (0, import_common.copyObject)(operations);
options = options || {};
await this.container.checkAndInitializeEncryption();
options.containerRid = this.container._rid;
diagnosticNode.beginEncryptionDiagnostics(import_common.Constants.Encryption.DiagnosticsEncryptOperation);
const { operations: encryptedOperations, totalPropertiesEncryptedCount } = await this.bulkBatchEncryptionHelper(operations);
operations = encryptedOperations;
diagnosticNode.endEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsEncryptOperation,
totalPropertiesEncryptedCount
);
}
const batches = partitionKeyRanges.map((keyRange) => {
return {
min: keyRange.minInclusive,
max: keyRange.maxExclusive,
rangeId: keyRange.id,
indexes: [],
operations: []
};
});
this.groupOperationsBasedOnPartitionKey(operations, partitionKeyDefinition, options, batches);
const path = (0, import_common.getPathFromLink)(this.container.url, import_common.ResourceType.item);
const orderedResponses = [];
const batchMap = batches.filter((batch) => batch.operations.length).flatMap((batch) => (0, import_batch.splitBatchBasedOnBodySize)(batch));
await Promise.all(
this.executeBatchOperations(
batchMap,
path,
bulkOptions,
options,
diagnosticNode,
orderedResponses,
partitionKeyDefinition
)
);
const response = orderedResponses;
response.diagnostics = diagnosticNode.toDiagnostic(this.clientContext.getClientConfig());
return response;
}, this.clientContext);
}
executeBatchOperations(batchMap, path, bulkOptions, options, diagnosticNode, orderedResponses, partitionKeyDefinition) {
return batchMap.map(async (batch) => {
if (batch.operations.length > 100) {
throw new Error("Cannot run bulk request with more than 100 operations per partition");
}
let response;
try {
response = await (0, import_diagnostics.addDiagnosticChild)(
async (childNode) => this.clientContext.bulk({
body: batch.operations,
partitionKeyRangeId: batch.rangeId,
path,
resourceId: this.container.url,
bulkOptions,
options,
diagnosticNode: childNode
}),
diagnosticNode,
import_DiagnosticNodeInternal.DiagnosticNodeType.BATCH_REQUEST
);
response.result.forEach((operationResponse, index) => {
orderedResponses[batch.indexes[index]] = operationResponse;
});
} catch (err) {
if (this.clientContext.enableEncryption) {
await this.container.throwIfRequestNeedsARetryPostPolicyRefresh(err);
}
if (err.code === import_common.StatusCodes.Gone) {
const isPartitionSplit = err.substatus === import_common.SubStatusCodes.PartitionKeyRangeGone || err.substatus === import_common.SubStatusCodes.CompletingSplit;
if (isPartitionSplit) {
const queryRange = new import_routing.QueryRange(batch.min, batch.max, true, false);
const overlappingRanges = await this.partitionKeyRangeCache.getOverlappingRanges(
this.container.url,
queryRange,
diagnosticNode,
true
);
if (overlappingRanges.length < 1) {
throw new Error("Partition split/merge detected but no overlapping ranges found.");
}
if (overlappingRanges.length >= 1) {
const newBatches = this.createNewBatches(
overlappingRanges,
batch,
partitionKeyDefinition
);
await Promise.all(
this.executeBatchOperations(
newBatches,
path,
bulkOptions,
options,
diagnosticNode,
orderedResponses,
partitionKeyDefinition
)
);
}
} else {
throw new Error(
"Partition key error. An operation has an unsupported partitionKey type" + err.message
);
}
} else {
throw new Error(`Bulk request errored with: ${err.message}`);
}
}
if (response) {
try {
if (this.clientContext.enableEncryption) {
diagnosticNode.beginEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsDecryptOperation
);
let count = 0;
for (const result of response.result) {
if (result.resourceBody) {
const { body, propertiesDecryptedCount } = await this.container.encryptionProcessor.decrypt(result.resourceBody);
result.resourceBody = body;
count += propertiesDecryptedCount;
}
}
diagnosticNode.endEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsDecryptOperation,
count
);
}
} catch (error) {
const decryptionError = new import_request.ErrorResponse(
`Batch response was received but response decryption failed: + ${error.message}`
);
decryptionError.code = import_common.StatusCodes.ServiceUnavailable;
throw decryptionError;
}
response.result.forEach((operationResponse, index) => {
orderedResponses[batch.indexes[index]] = operationResponse;
});
}
});
}
/**
* Function to create new batches based of partition key Ranges.
*
* @param overlappingRanges - Overlapping partition key ranges.
* @param batch - Batch to be split.
* @param partitionKeyDefinition - PartitionKey definition of container.
* @returns Array of new batches.
*/
createNewBatches(overlappingRanges, batch, partitionKeyDefinition) {
const newBatches = overlappingRanges.map((keyRange) => {
return {
min: keyRange.minInclusive,
max: keyRange.maxExclusive,
rangeId: keyRange.id,
indexes: [],
operations: []
};
});
let indexValue = 0;
batch.operations.forEach((operation) => {
const partitionKey = JSON.parse(operation.partitionKey);
const hashed = (0, import_hash.hashPartitionKey)(
(0, import_typeChecks.assertNotUndefined)(
partitionKey,
"undefined value for PartitionKey is not expected during grouping of bulk operations."
),
partitionKeyDefinition
);
const batchForKey = (0, import_typeChecks.assertNotUndefined)(
newBatches.find((newBatch) => {
return (0, import_batch.isKeyInRange)(newBatch.min, newBatch.max, hashed);
}),
"No suitable Batch found."
);
batchForKey.operations.push(operation);
batchForKey.indexes.push(batch.indexes[indexValue]);
indexValue++;
});
return newBatches;
}
/**
* Function to create batches based of partition key Ranges.
* @param operations - operations to group
* @param partitionDefinition - PartitionKey definition of container.
* @param options - Request options for bulk request.
* @param batches - Groups to be filled with operations.
*/
groupOperationsBasedOnPartitionKey(operations, partitionDefinition, options, batches) {
operations.forEach((operationInput, index) => {
const { operation, partitionKey } = (0, import_batch.prepareOperations)(
operationInput,
partitionDefinition,
options
);
const hashed = (0, import_hash.hashPartitionKey)(
(0, import_typeChecks.assertNotUndefined)(
partitionKey,
"undefined value for PartitionKey is not expected during grouping of bulk operations."
),
partitionDefinition
);
const batchForKey = (0, import_typeChecks.assertNotUndefined)(
batches.find((batch) => {
return (0, import_batch.isKeyInRange)(batch.min, batch.max, hashed);
}),
"No suitable Batch found."
);
batchForKey.operations.push(operation);
batchForKey.indexes.push(index);
});
}
/**
* Execute transactional batch operations on items.
*
* Batch takes an array of Operations which are typed based on what the operation does. Batch is transactional and will rollback all operations if one fails.
* The choices are: Create, Upsert, Read, Replace, and Delete
*
* Usage example:
* ```ts snippet:ItemsBatch
* import { CosmosClient, OperationInput } from "@azure/cosmos";
*
* const endpoint = "https://your-account.documents.azure.com";
* const key = "<database account masterkey>";
* const client = new CosmosClient({ endpoint, key });
*
* const { database } = await client.databases.createIfNotExists({ id: "Test Database" });
*
* const { container } = await database.containers.createIfNotExists({ id: "Test Container" });
*
* // The partitionKey is a required second argument. If it’s undefined, it defaults to the expected partition key format.
* const operations: OperationInput[] = [
* {
* operationType: "Create",
* resourceBody: { id: "doc1", name: "sample", key: "A" },
* },
* {
* operationType: "Upsert",
* resourceBody: { id: "doc2", name: "other", key: "A" },
* },
* ];
*
* await container.items.batch(operations, "A");
* ```
*
* @param operations - List of operations. Limit 100
* @param options - Used for modifying the request
*/
async batch(operations, partitionKey, options) {
return (0, import_diagnostics.withDiagnostics)(async (diagnosticNode) => {
operations.map((operation) => (0, import_batch.decorateBatchOperation)(operation, options));
partitionKey = await (0, import_extractPartitionKey.setPartitionKeyIfUndefined)(diagnosticNode, this.container, partitionKey);
const path = (0, import_common.getPathFromLink)(this.container.url, import_common.ResourceType.item);
if (operations.length > 100) {
throw new Error("Cannot run batch request with more than 100 operations per partition");
}
for (const operationInput of operations) {
if (operationInput && operationInput.partitionKey !== void 0) {
operationInput.partitionKey = JSON.stringify(
(0, import_documents.convertToInternalPartitionKey)(operationInput.partitionKey)
);
}
}
let response;
try {
if (this.clientContext.enableEncryption) {
operations = (0, import_common.copyObject)(operations);
options = options || {};
await this.container.checkAndInitializeEncryption();
options.containerRid = this.container._rid;
let count = 0;
diagnosticNode.beginEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsEncryptOperation
);
if (partitionKey) {
const partitionKeyInternal = (0, import_documents.convertToInternalPartitionKey)(partitionKey);
const { partitionKeyList, encryptedCount } = await this.container.encryptionProcessor.getEncryptedPartitionKeyValue(
partitionKeyInternal
);
partitionKey = partitionKeyList;
count += encryptedCount;
}
const { operations: encryptedOperations, totalPropertiesEncryptedCount } = await this.bulkBatchEncryptionHelper(operations);
operations = encryptedOperations;
count += totalPropertiesEncryptedCount;
diagnosticNode.endEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsEncryptOperation,
count
);
}
const isPartitionLevelFailOverEnabled = this.clientContext.isPartitionLevelFailOverEnabled();
const partitionKeyRangeId = await (0, import_ClientUtils.computePartitionKeyRangeId)(
diagnosticNode,
partitionKey,
this.partitionKeyRangeCache,
isPartitionLevelFailOverEnabled,
this.container
);
response = await this.clientContext.batch({
body: operations,
partitionKey,
path,
resourceId: this.container.url,
options,
diagnosticNode,
partitionKeyRangeId
});
} catch (err) {
if (this.clientContext.enableEncryption) {
await this.container.throwIfRequestNeedsARetryPostPolicyRefresh(err);
}
throw new Error(`Batch request error: ${err.message}`);
}
if (this.clientContext.enableEncryption) {
try {
diagnosticNode.beginEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsDecryptOperation
);
let count = 0;
for (const result of response.result) {
if (result.resourceBody) {
const { body, propertiesDecryptedCount } = await this.container.encryptionProcessor.decrypt(result.resourceBody);
result.resourceBody = body;
count += propertiesDecryptedCount;
}
}
diagnosticNode.endEncryptionDiagnostics(
import_common.Constants.Encryption.DiagnosticsDecryptOperation,
count
);
} catch (error) {
const decryptionError = new import_request.ErrorResponse(
`Batch response was received but response decryption failed: + ${error.message}`
);
decryptionError.code = import_common.StatusCodes.ServiceUnavailable;
throw decryptionError;
}
}
return response;
}, this.clientContext);
}
async bulkBatchEncryptionHelper(operations) {
let totalPropertiesEncryptedCount = 0;
const encryptedOperations = [];
for (const operation of operations) {
const { operation: encryptedOp, totalPropertiesEncryptedCount: updatedCount } = await (0, import_batch.encryptOperationInput)(
this.container.encryptionProcessor,
operation,
totalPropertiesEncryptedCount
);
totalPropertiesEncryptedCount = updatedCount;
encryptedOperations.push(encryptedOp);
}
return { operations: encryptedOperations, totalPropertiesEncryptedCount };
}
}
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
Items
});