@azure/cosmos
Version:
Microsoft Azure Cosmos DB Service Node.js SDK for NOSQL API
171 lines (170 loc) • 6.29 kB
JavaScript
var __create = Object.create;
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __getProtoOf = Object.getPrototypeOf;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
// If the importer is in node compatibility mode or this is not an ESM
// file that has been converted to a CommonJS file using a Babel-
// compatible transform (i.e. "__esModule" has not been set), then set
// "default" to the CommonJS "module.exports" for node compatibility.
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
mod
));
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
var HelperPerPartition_exports = {};
__export(HelperPerPartition_exports, {
HelperPerPartition: () => HelperPerPartition
});
module.exports = __toCommonJS(HelperPerPartition_exports);
var import_semaphore = __toESM(require("semaphore"));
var import_statusCodes = require("../common/statusCodes.js");
var import_Batcher = require("./Batcher.js");
var import_CongestionAlgorithm = require("./CongestionAlgorithm.js");
var import_PartitionMetric = require("./PartitionMetric.js");
var import_Limiter = require("./Limiter.js");
class HelperPerPartition {
executor;
retrier;
currentBatcher;
lock;
partitionMetric;
oldPartitionMetric;
diagnosticLevel;
encryptionEnabled;
encryptionProcessor;
clientConfigDiagnostics;
congestionControlAlgorithm;
dispatchLimiterQueue;
initialConcurrency = 1;
processedOperationCountRef;
constructor(executor, retrier, refreshpartitionKeyRangeCache, diagnosticLevel, encryptionEnabled, clientConfig, encryptionProcessor, processedOperationCountRef) {
this.executor = executor;
this.retrier = retrier;
this.diagnosticLevel = diagnosticLevel;
this.encryptionEnabled = encryptionEnabled;
this.encryptionProcessor = encryptionProcessor;
this.clientConfigDiagnostics = clientConfig;
this.oldPartitionMetric = new import_PartitionMetric.PartitionMetric();
this.partitionMetric = new import_PartitionMetric.PartitionMetric();
this.processedOperationCountRef = processedOperationCountRef;
this.lock = (0, import_semaphore.default)(1);
this.dispatchLimiterQueue = new import_Limiter.LimiterQueue(
this.initialConcurrency,
this.partitionMetric,
this.retrier,
refreshpartitionKeyRangeCache
);
this.congestionControlAlgorithm = new import_CongestionAlgorithm.CongestionAlgorithm(
this.dispatchLimiterQueue,
this.partitionMetric,
this.oldPartitionMetric
);
this.currentBatcher = this.createBatcher();
}
/**
* Enqueues an operation into the current batch.
* If the operation does not fit because the batch is full, the full batch is enqueued in the dispatch queue
* and a new batch is created. The promise resolves when the operation has been successfully added.
*/
async add(operation) {
return new Promise((resolve, reject) => {
this.lock.take(() => {
try {
while (!this.currentBatcher.tryAdd(operation)) {
const fullBatch = this.getBatchToQueueAndCreate();
if (fullBatch) {
this.dispatchLimiterQueue.push(fullBatch);
}
}
resolve();
} catch (err) {
const response = {
operationInput: operation.unencryptedOperationInput,
error: Object.assign(new Error(err.message), {
code: import_statusCodes.StatusCodes.InternalServerError,
diagnostics: operation.operationContext.diagnosticNode.toDiagnostic(
this.clientConfigDiagnostics
)
})
};
operation.operationContext.fail(response);
this.processedOperationCountRef.count++;
reject(err);
} finally {
this.lock.leave();
}
});
});
}
/**
* @returns the batch to be dispatched and creates a new one
*/
getBatchToQueueAndCreate() {
if (this.currentBatcher.isEmpty()) return null;
const previousBatcher = this.currentBatcher;
this.currentBatcher = this.createBatcher();
return previousBatcher;
}
/**
* In case there are leftover operations that did not fill a full batch,
* dispatchUnfilledBatch will add those operations as a batch in the dispatch queue.
*/
addPartialBatchToQueue() {
this.lock.take(() => {
try {
if (!this.currentBatcher.isEmpty()) {
const batch = this.currentBatcher;
this.currentBatcher = this.createBatcher();
this.dispatchLimiterQueue.push(batch);
}
} finally {
this.lock.leave();
}
});
}
createBatcher() {
return new import_Batcher.Batcher(
this.dispatchLimiterQueue,
this.executor,
this.retrier,
this.diagnosticLevel,
this.encryptionEnabled,
this.clientConfigDiagnostics,
this.encryptionProcessor,
this.processedOperationCountRef
);
}
/**
* Runs congestion algo for a partition.
* Controlled by a single timer for all the partitions.
*/
runCongestionAlgorithm() {
this.congestionControlAlgorithm.run();
}
/**
* Empties the dispatch queue and clears the current batch.
* This is used in case of stale container Rid detected for encryption operations
*/
async dispose() {
await this.dispatchLimiterQueue.pauseAndClear(null);
this.currentBatcher = void 0;
}
}
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
HelperPerPartition
});