UNPKG

@azure/cosmos

Version:
852 lines (851 loc) • 34.2 kB
var __create = Object.create; var __defProp = Object.defineProperty; var __getOwnPropDesc = Object.getOwnPropertyDescriptor; var __getOwnPropNames = Object.getOwnPropertyNames; var __getProtoOf = Object.getPrototypeOf; var __hasOwnProp = Object.prototype.hasOwnProperty; var __export = (target, all) => { for (var name in all) __defProp(target, name, { get: all[name], enumerable: true }); }; var __copyProps = (to, from, except, desc) => { if (from && typeof from === "object" || typeof from === "function") { for (let key of __getOwnPropNames(from)) if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); } return to; }; var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps( // If the importer is in node compatibility mode or this is not an ESM // file that has been converted to a CommonJS file using a Babel- // compatible transform (i.e. "__esModule" has not been set), then set // "default" to the CommonJS "module.exports" for node compatibility. isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target, mod )); var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); var parallelQueryExecutionContextBase_exports = {}; __export(parallelQueryExecutionContextBase_exports, { ParallelQueryExecutionContextBase: () => ParallelQueryExecutionContextBase, ParallelQueryExecutionContextBaseStates: () => ParallelQueryExecutionContextBaseStates }); module.exports = __toCommonJS(parallelQueryExecutionContextBase_exports); var import_priorityqueuejs = __toESM(require("priorityqueuejs")); var import_semaphore = __toESM(require("semaphore")); var import_statusCodes = require("../common/statusCodes.js"); var import_ErrorResponse = require("../request/ErrorResponse.js"); var import_QueryRange = require("../routing/QueryRange.js"); var import_smartRoutingMapProvider = require("../routing/smartRoutingMapProvider.js"); var import_documentProducer = require("./documentProducer.js"); var import_headerUtils = require("./headerUtils.js"); var import_RidSkipCountFilter = require("./queryFilteringStrategy/RidSkipCountFilter.js"); var import_DiagnosticNodeInternal = require("../diagnostics/DiagnosticNodeInternal.js"); var import_parallelQueryResult = require("./parallelQueryResult.js"); var ParallelQueryExecutionContextBaseStates = /* @__PURE__ */ ((ParallelQueryExecutionContextBaseStates2) => { ParallelQueryExecutionContextBaseStates2["started"] = "started"; ParallelQueryExecutionContextBaseStates2["inProgress"] = "inProgress"; ParallelQueryExecutionContextBaseStates2["ended"] = "ended"; return ParallelQueryExecutionContextBaseStates2; })(ParallelQueryExecutionContextBaseStates || {}); class ParallelQueryExecutionContextBase { /** * Provides the ParallelQueryExecutionContextBase. * This is the base class that ParallelQueryExecutionContext and OrderByQueryExecutionContext will derive from. * * When handling a parallelized query, it instantiates one instance of * DocumentProcuder per target partition key range and aggregates the result of each. * * @param clientContext - The service endpoint to use to create the client. * @param collectionLink - The Collection Link * @param options - Represents the feed options. * @param partitionedQueryExecutionInfo - PartitionedQueryExecutionInfo * @hidden */ constructor(clientContext, collectionLink, query, options, partitionedQueryExecutionInfo, correlatedActivityId, rangeManager, queryProcessingStrategy, documentProducerComparator) { this.clientContext = clientContext; this.collectionLink = collectionLink; this.query = query; this.options = options; this.partitionedQueryExecutionInfo = partitionedQueryExecutionInfo; this.correlatedActivityId = correlatedActivityId; this.rangeManager = rangeManager; this.queryProcessingStrategy = queryProcessingStrategy; this.documentProducerComparator = documentProducerComparator; this.clientContext = clientContext; this.collectionLink = collectionLink; this.query = query; this.options = options; this.partitionedQueryExecutionInfo = partitionedQueryExecutionInfo; this.correlatedActivityId = correlatedActivityId; this.diagnosticNodeWrapper = { consumed: false, diagnosticNode: new import_DiagnosticNodeInternal.DiagnosticNodeInternal( clientContext.diagnosticLevel, import_DiagnosticNodeInternal.DiagnosticNodeType.PARALLEL_QUERY_NODE, null ) }; this.diagnosticNodeWrapper.diagnosticNode.addData({ stateful: true }); this.err = void 0; this.state = ParallelQueryExecutionContextBase.STATES.started; this.routingProvider = new import_smartRoutingMapProvider.SmartRoutingMapProvider(this.clientContext); this.buffer = []; this.requestContinuation = options ? options.continuationToken || options.continuation : void 0; if (this.requestContinuation && !this.options.enableQueryControl) { throw new Error( "Continuation tokens are supported when enableQueryControl is set true in FeedOptions" ); } this.respHeaders = (0, import_headerUtils.getInitialHeader)(); this.unfilledDocumentProducersQueue = new import_priorityqueuejs.default( (a, b) => this.compareDocumentProducersByRange(a, b) ); this.bufferedDocumentProducersQueue = new import_priorityqueuejs.default( (a, b) => this.documentProducerComparator(b, a) ); this.sem = (0, import_semaphore.default)(1); this.sem.take(() => this._initializeDocumentProducers()); } err; state; static STATES = ParallelQueryExecutionContextBaseStates; routingProvider; requestContinuation; respHeaders; unfilledDocumentProducersQueue; bufferedDocumentProducersQueue; // TODO: update type of buffer from any --> generic can be used here buffer; partitionDataPatchMap = /* @__PURE__ */ new Map(); patchCounter = 0; updatedContinuationRanges = /* @__PURE__ */ new Map(); sem; diagnosticNodeWrapper; /** * Determine if there are still remaining resources to processs based on the value of the continuation * token or the elements remaining on the current batch in the QueryIterator. * @returns true if there is other elements to process in the ParallelQueryExecutionContextBase. */ hasMoreResults() { return !this.err && (this.buffer.length > 0 || this.state !== ParallelQueryExecutionContextBase.STATES.ended); } /** * Fetches more results from the query execution context. * @param diagnosticNode - Optional diagnostic node for tracing. * @returns A promise that resolves to the fetched results. * @hidden */ async fetchMore(diagnosticNode) { await this.bufferDocumentProducers(diagnosticNode); await this.fillBufferFromBufferQueue(); return this.drainBufferedItems(); } /** * Processes buffered document producers * @returns A promise that resolves when processing is complete. */ async processBufferedDocumentProducers() { while (this.hasBufferedProducers() && this.shouldProcessBufferedProducers(this.isUnfilledQueueEmpty())) { const producer = this.getNextBufferedProducer(); if (!producer) break; await this.processDocumentProducer(producer); } } /** * Processes a single document producer using template method pattern. * Common structure with query-specific processing delegated to subclasses. */ async processDocumentProducer(producer) { const response = await this.fetchFromProducer(producer); this._mergeWithActiveResponseHeaders(response.headers); if (response.result) { this.addToBuffer(response.result); this.handlePartitionMapping(producer, response.result); } if (producer.peakNextItem() !== void 0) { this.requeueProducer(producer); } else if (producer.hasMoreResults()) { this.moveToUnfilledQueue(producer); } } /** * Handles partition mapping updates - implemented in base class using template method pattern. * Child classes provide query-specific parameters through abstract methods. */ handlePartitionMapping(producer, result) { const itemCount = result?.length || 0; const continuationToken = this.getContinuationToken(producer); const mapping = { itemCount, partitionKeyRange: producer.targetPartitionKeyRange, continuationToken }; this.updatePartitionMapping(mapping); } /** * Gets the continuation token to use - implemented by subclasses. */ getContinuationToken(producer) { const hasMoreBufferedItems = producer.peakNextItem() !== void 0; return hasMoreBufferedItems ? producer.previousContinuationToken : producer.continuationToken; } /** * Updates partition mapping - creates new entry or merges with existing for ORDER BY queries. */ updatePartitionMapping(mapping) { const currentPatch = this.partitionDataPatchMap.get(this.patchCounter.toString()); const isSamePartition = currentPatch?.partitionKeyRange?.id === mapping.partitionKeyRange.id; if (isSamePartition && currentPatch) { currentPatch.itemCount += mapping.itemCount; currentPatch.continuationToken = mapping.continuationToken; return; } this.partitionDataPatchMap.set((++this.patchCounter).toString(), mapping); } /** * Checks if the unfilled queue is empty (used by ORDER BY for processing control). */ isUnfilledQueueEmpty() { return this.unfilledDocumentProducersQueue.size() === 0; } /** * Initializes document producers and fills the priority queue. * Handles both continuation token and fresh query scenarios. */ async _initializeDocumentProducers() { try { const targetPartitionRanges = await this._onTargetPartitionRanges(); const documentProducers = this.requestContinuation ? await this._createDocumentProducersFromContinuation(targetPartitionRanges) : this._createDocumentProducersFromFresh(targetPartitionRanges); this._enqueueDocumentProducers(documentProducers); this.sem.leave(); } catch (err) { this.err = err; this.sem.leave(); } } /** * Creates document producers from continuation token scenario. */ async _createDocumentProducersFromContinuation(targetPartitionRanges) { const parsedToken = this._parseContinuationToken(this.requestContinuation); const continuationRanges = await this._handlePartitionRangeChanges(parsedToken); const additionalQueryInfo = this.queryProcessingStrategy.createAdditionalQueryInfo(parsedToken); const filterResult = this.rangeManager.filterPartitionRanges( targetPartitionRanges, continuationRanges, additionalQueryInfo ); const rangeTokenPairs = filterResult.rangeTokenPairs; const filterContext = this.queryProcessingStrategy.createFilterContext(parsedToken); return rangeTokenPairs.map( (rangeTokenPair) => this._createDocumentProducerFromRangeTokenPair( rangeTokenPair, continuationRanges, filterContext ) ); } /** * Creates document producers from fresh query scenario (no continuation token). */ _createDocumentProducersFromFresh(targetPartitionRanges) { return targetPartitionRanges.map( (partitionTargetRange) => this._createTargetPartitionQueryExecutionContext(partitionTargetRange, void 0) ); } /** * Creates a document producer from a range token pair (continuation token scenario). */ _createDocumentProducerFromRangeTokenPair(rangeTokenPair, continuationRanges, filterContext) { const partitionTargetRange = rangeTokenPair.range; const continuationToken = rangeTokenPair.continuationToken; const filterCondition = rangeTokenPair.filteringCondition || void 0; const matchingContinuationRange = continuationRanges.find( (cr) => cr.range.id === partitionTargetRange.id ); const startEpk = matchingContinuationRange?.epkMin; const endEpk = matchingContinuationRange?.epkMax; const targetPartitionId = continuationRanges.length > 0 && continuationRanges[continuationRanges.length - 1].range ? continuationRanges[continuationRanges.length - 1].range.id : void 0; const partitionFilterContext = this.queryProcessingStrategy.getPartitionFilterContext( filterContext, targetPartitionId, partitionTargetRange.id ); return this._createTargetPartitionQueryExecutionContext( partitionTargetRange, continuationToken, startEpk, endEpk, !!(startEpk && endEpk), // populateEpkRangeHeaders - true if both EPK values are present filterCondition, partitionFilterContext ); } /** * Enqueues document producers into the unfilled queue. */ _enqueueDocumentProducers(documentProducers) { documentProducers.forEach((documentProducer) => { try { this.unfilledDocumentProducersQueue.enq(documentProducer); } catch (e) { this.err = e; } }); } /** * Checks if there are buffered document producers ready for processing. * Encapsulates queue size checking. */ hasBufferedProducers() { return this.bufferedDocumentProducersQueue.size() > 0; } /** * Gets the next buffered document producer for processing. * Encapsulates queue dequeuing logic. */ getNextBufferedProducer() { if (this.bufferedDocumentProducersQueue.size() > 0) { return this.bufferedDocumentProducersQueue.deq(); } return void 0; } /** * Adds items to the result buffer. Handles both single items and arrays. */ addToBuffer(items) { if (Array.isArray(items)) { if (items.length > 0) { this.buffer.push(...items); } } else if (items) { this.buffer.push(items); } } /** * Moves a producer to the unfilled queue for later processing. */ moveToUnfilledQueue(producer) { this.unfilledDocumentProducersQueue.enq(producer); } /** * Re-queues a producer to the buffered queue for further processing. */ requeueProducer(producer) { this.bufferedDocumentProducersQueue.enq(producer); } /** * Compares two document producers based on their partition key ranges and EPK values. * Primary comparison: minInclusive values for left-to-right range traversal * Secondary comparison: EPK ranges when minInclusive values are identical * @param a - First document producer * @param b - Second document producer * @returns Comparison result for priority queue ordering * @hidden */ compareDocumentProducersByRange(a, b) { const aMinInclusive = a.targetPartitionKeyRange.minInclusive; const bMinInclusive = b.targetPartitionKeyRange.minInclusive; const minInclusiveComparison = bMinInclusive.localeCompare(aMinInclusive); if (minInclusiveComparison === 0) { const aMinEpk = a.startEpk; const bMinEpk = b.startEpk; if (aMinEpk && bMinEpk) { return bMinEpk.localeCompare(aMinEpk); } } return minInclusiveComparison; } /** * Detects partition splits/merges by analyzing parsed continuation token ranges and comparing with current topology * @param parsed - The continuation token containing range mappings to analyze * @returns Array of processed ranges with EPK info */ async _handlePartitionRangeChanges(parsed) { const processedRanges = []; const rangeMappings = parsed.rangeMappings; if (!rangeMappings || rangeMappings.length === 0) { return []; } for (const rangeWithToken of rangeMappings) { const range = rangeWithToken.queryRange; const queryRange = new import_QueryRange.QueryRange( range.min, range.max, true, // isMinInclusive - assumption: always true false // isMaxInclusive - assumption: always false (max is exclusive) ); const rangeMin = queryRange.min; const rangeMax = queryRange.max; const overlappingRanges = await this.routingProvider.getOverlappingRanges( this.collectionLink, [queryRange], this.getDiagnosticNode() ); if (overlappingRanges.length === 0) { continue; } else if (overlappingRanges.length === 1) { const currentRange = overlappingRanges[0]; if (currentRange.minInclusive !== rangeMin || currentRange.maxExclusive !== rangeMax) { await this._handleContinuationTokenMerge(rangeWithToken, currentRange); processedRanges.push({ range: currentRange, continuationToken: rangeWithToken.continuationToken, epkMin: rangeMin, // Original range min becomes EPK min epkMax: rangeMax // Original range max becomes EPK max }); } else { processedRanges.push({ range: currentRange, continuationToken: rangeWithToken.continuationToken }); } } else { await this._handleContinuationTokenSplit(rangeWithToken, overlappingRanges); overlappingRanges.forEach((rangeValue) => { processedRanges.push({ range: rangeValue, continuationToken: rangeWithToken.continuationToken }); }); } } return processedRanges; } /** * Parses the continuation token based on query type * @param continuationToken - The continuation token string to parse * @returns Parsed continuation token object (ORDER BY or Parallel query token) * @throws ErrorResponse when continuation token is malformed or cannot be parsed */ _parseContinuationToken(continuationToken) { try { return this.queryProcessingStrategy.parseContinuationToken(continuationToken); } catch (e) { throw new import_ErrorResponse.ErrorResponse( `Invalid continuation token format. Expected token with rangeMappings property. Ensure the continuation token was generated by a compatible query and has not been modified.` ); } } /** * Handles partition merge scenario for continuation token ranges */ async _handleContinuationTokenMerge(rangeWithToken, _newMergedRange) { const rangeKey = `${rangeWithToken.queryRange.min}-${rangeWithToken.queryRange.max}`; this.updatedContinuationRanges.set(rangeKey, { oldRange: { min: rangeWithToken.queryRange.min, max: rangeWithToken.queryRange.max, isMinInclusive: true, // Assumption: min is always inclusive isMaxInclusive: false // Assumption: max is always exclusive }, newRanges: [ { min: rangeWithToken.queryRange.min, max: rangeWithToken.queryRange.max, isMinInclusive: true, // Assumption: min is always inclusive isMaxInclusive: false // Assumption: max is always exclusive } ], continuationToken: rangeWithToken.continuationToken }); } /** * Handles partition split scenario for continuation token ranges */ async _handleContinuationTokenSplit(rangeWithToken, overlappingRanges) { const rangeKey = `${rangeWithToken.queryRange.min}-${rangeWithToken.queryRange.max}`; this.updatedContinuationRanges.set(rangeKey, { oldRange: { min: rangeWithToken.queryRange.min, max: rangeWithToken.queryRange.max, isMinInclusive: true, // Assumption: min is always inclusive isMaxInclusive: false // Assumption: max is always exclusive }, newRanges: overlappingRanges.map((range) => ({ min: range.minInclusive, max: range.maxExclusive, isMinInclusive: true, isMaxInclusive: false })), continuationToken: rangeWithToken.continuationToken }); } /** * Handles partition merge scenario for continuation token ranges */ _mergeWithActiveResponseHeaders(headers) { (0, import_headerUtils.mergeHeaders)(this.respHeaders, headers); } _getAndResetActiveResponseHeaders() { const ret = this.respHeaders; this.respHeaders = (0, import_headerUtils.getInitialHeader)(); return ret; } getDiagnosticNode() { return this.diagnosticNodeWrapper.diagnosticNode; } async _onTargetPartitionRanges() { const parsedRanges = this.partitionedQueryExecutionInfo.queryRanges; const queryRanges = parsedRanges.map((item) => import_QueryRange.QueryRange.parseFromDict(item)); return this.routingProvider.getOverlappingRanges( this.collectionLink, queryRanges, this.getDiagnosticNode() ); } /** * Gets the replacement ranges for a partitionkeyrange that has been split */ async _getReplacementPartitionKeyRanges(documentProducer, diagnosticNode) { const partitionKeyRange = documentProducer.targetPartitionKeyRange; this.routingProvider = new import_smartRoutingMapProvider.SmartRoutingMapProvider(this.clientContext); const queryRange = import_QueryRange.QueryRange.parsePartitionKeyRange(partitionKeyRange); return this.routingProvider.getOverlappingRanges( this.collectionLink, [queryRange], diagnosticNode ); } async _enqueueReplacementDocumentProducers(error, diagnosticNode, documentProducer) { const replacementPartitionKeyRanges = await this._getReplacementPartitionKeyRanges( documentProducer, diagnosticNode ); if (replacementPartitionKeyRanges.length === 0) { throw error; } if (this.requestContinuation) { this._updateContinuationTokenOnPartitionChange( documentProducer, replacementPartitionKeyRanges ); } if (replacementPartitionKeyRanges.length === 1) { const replacementDocumentProducer = this._createTargetPartitionQueryExecutionContext( replacementPartitionKeyRanges[0], documentProducer.continuationToken, documentProducer.startEpk, documentProducer.endEpk, true ); this.unfilledDocumentProducersQueue.enq(replacementDocumentProducer); } else { const replacementDocumentProducers = []; replacementPartitionKeyRanges.forEach((partitionKeyRange) => { const queryRange = import_QueryRange.QueryRange.parsePartitionKeyRange(partitionKeyRange); const replacementDocumentProducer = this._createTargetPartitionQueryExecutionContext( partitionKeyRange, documentProducer.continuationToken, queryRange.min, queryRange.max, false ); replacementDocumentProducers.push(replacementDocumentProducer); }); replacementDocumentProducers.forEach((replacementDocumentProducer) => { if (replacementDocumentProducer.hasMoreResults()) { this.unfilledDocumentProducersQueue.enq(replacementDocumentProducer); } }); } } _updateContinuationTokenOnPartitionChange(originalDocumentProducer, replacementPartitionKeyRanges) { const rangeWithToken = this._createQueryRangeWithContinuationToken(originalDocumentProducer); if (replacementPartitionKeyRanges.length === 1) { this._handleContinuationTokenMerge(rangeWithToken, replacementPartitionKeyRanges[0]); } else { this._handleContinuationTokenSplit(rangeWithToken, replacementPartitionKeyRanges); } } /** * Creates a QueryRangeWithContinuationToken object from a DocumentProducer. * Uses the DocumentProducer's target partition key range and continuation token. * @param documentProducer - The DocumentProducer to convert * @returns QueryRangeWithContinuationToken object for token operations */ _createQueryRangeWithContinuationToken(documentProducer) { const partitionRange = documentProducer.targetPartitionKeyRange; const simplifiedQueryRange = { min: documentProducer.startEpk || partitionRange.minInclusive, max: documentProducer.endEpk || partitionRange.maxExclusive }; return { queryRange: simplifiedQueryRange, continuationToken: documentProducer.continuationToken }; } static _needPartitionKeyRangeCacheRefresh(error) { return error.code === import_statusCodes.StatusCodes.Gone && "substatus" in error && error["substatus"] === import_statusCodes.SubStatusCodes.PartitionKeyRangeGone; } /** * Replaces the format placeholder in the rewritten query with the provided filter condition. * Handles both string queries and SqlQuerySpec objects. */ _replaceFormatPlaceholder(rewrittenQuery, formatPlaceHolder, filterCondition) { const replacement = filterCondition ?? "true"; if (typeof rewrittenQuery === "object" && rewrittenQuery !== null && rewrittenQuery.query) { return rewrittenQuery.query.replace(formatPlaceHolder, replacement); } return rewrittenQuery.replace(formatPlaceHolder, replacement); } /** * Creates target partition range Query Execution Context */ _createTargetPartitionQueryExecutionContext(partitionKeyTargetRange, continuationToken, startEpk, endEpk, populateEpkRangeHeaders, filterCondition, filterContext) { const rewrittenQuery = this.partitionedQueryExecutionInfo.queryInfo?.rewrittenQuery; let sqlQuerySpec; const query = this.query; if (typeof query === "string") { sqlQuerySpec = { query }; } else { sqlQuerySpec = query; } const formatPlaceHolder = "{documentdb-formattableorderbyquery-filter}"; if (rewrittenQuery) { sqlQuerySpec = JSON.parse(JSON.stringify(sqlQuerySpec)); const replacedQuery = this._replaceFormatPlaceholder( rewrittenQuery, formatPlaceHolder, filterCondition ); sqlQuerySpec["query"] = replacedQuery; } const options = { ...this.options }; options.continuationToken = continuationToken; let filter; if (filterContext) { filter = new import_RidSkipCountFilter.RidSkipCountFilter(filterContext); } return new import_documentProducer.DocumentProducer( this.clientContext, this.collectionLink, sqlQuerySpec, partitionKeyTargetRange, options, this.correlatedActivityId, startEpk, endEpk, populateEpkRangeHeaders, filter ); } async drainBufferedItems() { return new Promise((resolve, reject) => { this.sem.take(() => { if (this.err) { this.sem.leave(); this.err.headers = this._getAndResetActiveResponseHeaders(); reject(this.err); return; } if (this.buffer.length === 0) { this.sem.leave(); const partitionDataPatchMap2 = this.partitionDataPatchMap; this.partitionDataPatchMap = /* @__PURE__ */ new Map(); this.patchCounter = 0; const updatedContinuationRanges2 = Object.fromEntries( this.updatedContinuationRanges ); this.updatedContinuationRanges.clear(); const result2 = (0, import_parallelQueryResult.createParallelQueryResult)( [], partitionDataPatchMap2, updatedContinuationRanges2, void 0 ); return resolve({ result: this.state === ParallelQueryExecutionContextBase.STATES.ended ? void 0 : result2, headers: this._getAndResetActiveResponseHeaders() }); } const bufferedResults = this.buffer; this.buffer = []; const partitionDataPatchMap = this.partitionDataPatchMap; this.partitionDataPatchMap = /* @__PURE__ */ new Map(); this.patchCounter = 0; const updatedContinuationRanges = Object.fromEntries( this.updatedContinuationRanges ); this.updatedContinuationRanges.clear(); this.sem.leave(); const result = (0, import_parallelQueryResult.createParallelQueryResult)( bufferedResults, partitionDataPatchMap, updatedContinuationRanges, void 0 ); return resolve({ result, headers: this._getAndResetActiveResponseHeaders() }); }); }); } /** * Buffers document producers based on the maximum degree of parallelism. * Moves document producers from the unfilled queue to the buffered queue. * @param diagnosticNode - The diagnostic node for logging and tracing. * @returns A promise that resolves when buffering is complete. */ async bufferDocumentProducers(diagnosticNode) { return new Promise((resolve, reject) => { this.sem.take(async () => { if (this.err) { this.sem.leave(); reject(this.err); return; } this.updateStates(this.err); if (this.state === ParallelQueryExecutionContextBase.STATES.ended) { this.sem.leave(); resolve(); return; } if (this.unfilledDocumentProducersQueue.size() === 0) { this.sem.leave(); resolve(); return; } try { const maxDegreeOfParallelism = this.options.maxDegreeOfParallelism === void 0 || this.options.maxDegreeOfParallelism < 1 ? this.unfilledDocumentProducersQueue.size() : Math.min( this.options.maxDegreeOfParallelism, this.unfilledDocumentProducersQueue.size() ); const documentProducers = []; while (documentProducers.length < maxDegreeOfParallelism && this.unfilledDocumentProducersQueue.size() > 0) { let documentProducer; try { documentProducer = this.unfilledDocumentProducersQueue.deq(); } catch (e) { this.err = e; this.err.headers = this._getAndResetActiveResponseHeaders(); reject(this.err); return; } documentProducers.push(documentProducer); } const bufferDocumentProducer = async (documentProducer) => { try { const headers = await documentProducer.bufferMore(diagnosticNode); this._mergeWithActiveResponseHeaders(headers); const nextItem = documentProducer.peakNextItem(); if (nextItem !== void 0) { this.bufferedDocumentProducersQueue.enq(documentProducer); } else { if (documentProducer.continuationToken && documentProducer.continuationToken !== "" && documentProducer.continuationToken.toLowerCase() !== "null") { const patchKey = `empty-${documentProducer.targetPartitionKeyRange.id}-${documentProducer.targetPartitionKeyRange.minInclusive}`; this.partitionDataPatchMap.set(patchKey, { itemCount: 0, // 0 items for empty result set partitionKeyRange: documentProducer.targetPartitionKeyRange, continuationToken: documentProducer.continuationToken }); } if (documentProducer.hasMoreResults()) { this.unfilledDocumentProducersQueue.enq(documentProducer); } } } catch (err) { if (ParallelQueryExecutionContextBase._needPartitionKeyRangeCacheRefresh(err)) { await this._enqueueReplacementDocumentProducers( err, diagnosticNode, documentProducer ); resolve(); } else { this.err = err; this.err.headers = this._getAndResetActiveResponseHeaders(); reject(err); } } }; try { await Promise.all( documentProducers.map((producer) => bufferDocumentProducer(producer)) ); } catch (err) { this.err = err; this.err.headers = this._getAndResetActiveResponseHeaders(); reject(err); return; } resolve(); } catch (err) { this.err = err; this.err.headers = this._getAndResetActiveResponseHeaders(); reject(err); } finally { this.sem.leave(); } }); }); } /** * Drains the buffer of filled document producers and appends their items to the main buffer. * Uses template method pattern - delegates actual processing to subclasses. * @returns A promise that resolves when the buffer is filled. */ async fillBufferFromBufferQueue() { return new Promise((resolve, reject) => { this.sem.take(async () => { if (this.err) { this.sem.leave(); this.err.headers = this._getAndResetActiveResponseHeaders(); reject(this.err); return; } if (this.state === ParallelQueryExecutionContextBase.STATES.ended || this.bufferedDocumentProducersQueue.size() === 0) { this.sem.leave(); resolve(); return; } try { await this.processBufferedDocumentProducers(); this.updateStates(this.err); } catch (err) { this.err = err; this.err.headers = this._getAndResetActiveResponseHeaders(); reject(this.err); return; } finally { this.sem.leave(); } resolve(); return; }); }); } updateStates(error) { if (error) { this.err = error; this.state = ParallelQueryExecutionContextBase.STATES.ended; return; } if (this.state === ParallelQueryExecutionContextBase.STATES.started) { this.state = ParallelQueryExecutionContextBase.STATES.inProgress; } const hasNoActiveProducers = this.unfilledDocumentProducersQueue.size() === 0 && this.bufferedDocumentProducersQueue.size() === 0; if (hasNoActiveProducers) { this.state = ParallelQueryExecutionContextBase.STATES.ended; } } } // Annotate the CommonJS export names for ESM import in node: 0 && (module.exports = { ParallelQueryExecutionContextBase, ParallelQueryExecutionContextBaseStates });