UNPKG

ravendb

Version:
1,121 lines (1,120 loc) 67 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.RequestExecutor = exports.NodeStatus = void 0; const SemaphoreUtil_js_1 = require("../Utility/SemaphoreUtil.js"); const LogUtil_js_1 = require("../Utility/LogUtil.js"); const Timer_js_1 = require("../Primitives/Timer.js"); const ServerNode_js_1 = require("./ServerNode.js"); const Topology_js_1 = require("./Topology.js"); const GetDatabaseTopologyCommand_js_1 = require("../ServerWide/Commands/GetDatabaseTopologyCommand.js"); const StatusCode_js_1 = require("./StatusCode.js"); const NodeSelector_js_1 = require("./NodeSelector.js"); const Certificate_js_1 = require("../Auth/Certificate.js"); const HttpCache_js_1 = require("./HttpCache.js"); const index_js_1 = require("../Exceptions/index.js"); const GetClientConfigurationOperation_js_1 = require("../Documents/Operations/Configuration/GetClientConfigurationOperation.js"); const CurrentIndexAndNode_js_1 = require("./CurrentIndexAndNode.js"); const Constants_js_1 = require("../Constants.js"); const PromiseUtil_js_1 = require("../Utility/PromiseUtil.js"); const GetStatisticsOperation_js_1 = require("../Documents/Operations/GetStatisticsOperation.js"); const TypeUtil_js_1 = require("../Utility/TypeUtil.js"); const Serializer_js_1 = require("../Mapping/Json/Serializer.js"); const UriUtil_js_1 = require("../Utility/UriUtil.js"); const StreamUtil_js_1 = require("../Utility/StreamUtil.js"); const HttpUtil_js_1 = require("../Utility/HttpUtil.js"); const PromiseUtil_js_2 = require("../Utility/PromiseUtil.js"); const StringUtil_js_1 = require("../Utility/StringUtil.js"); const node_events_1 = require("node:events"); const SessionEvents_js_1 = require("../Documents/Session/SessionEvents.js"); const TimeUtil_js_1 = require("../Utility/TimeUtil.js"); const UpdateTopologyParameters_js_1 = require("./UpdateTopologyParameters.js"); const node_crypto_1 = require("node:crypto"); const DatabaseHealthCheckOperation_js_1 = require("../Documents/Operations/DatabaseHealthCheckOperation.js"); const GetNodeInfoCommand_js_1 = require("../ServerWide/Commands/GetNodeInfoCommand.js"); const Semaphore_js_1 = require("../Utility/Semaphore.js"); const OsUtil_js_1 = require("../Utility/OsUtil.js"); const ImportUtil_js_1 = require("../Utility/ImportUtil.js"); const DEFAULT_REQUEST_OPTIONS = {}; const log = (0, LogUtil_js_1.getLogger)({ module: "RequestExecutor" }); class IndexAndResponse { index; response; bodyStream; constructor(index, response, bodyStream) { this.index = index; this.response = response; this.bodyStream = bodyStream; } } class NodeStatus { _nodeStatusCallback; _timerPeriodInMs; node; requestExecutor; _timer; constructor(node, requestExecutor, nodeStatusCallback) { this.node = node; this.requestExecutor = requestExecutor; this._timerPeriodInMs = 100; this._nodeStatusCallback = nodeStatusCallback; } _nextTimerPeriod() { if (this._timerPeriodInMs <= 5000) { return 5000; } this._timerPeriodInMs = this._timerPeriodInMs + 100; return this._timerPeriodInMs; } startTimer() { this._timer = new Timer_js_1.Timer(() => { if (this.requestExecutor.disposed) { this.dispose(); return; } return this._nodeStatusCallback(this); }, this._timerPeriodInMs); } updateTimer() { this._timer.change(this._nextTimerPeriod()); } dispose() { this._timer.dispose(); } } exports.NodeStatus = NodeStatus; class RequestExecutor { _emitter = new node_events_1.EventEmitter(); /* we don't initialize this here due to issue with cloudflare see: https://github.com/cloudflare/miniflare/issues/292 */ static GLOBAL_APPLICATION_IDENTIFIER = null; static INITIAL_TOPOLOGY_ETAG = -2; _log; static CLIENT_VERSION = "7.1.0"; _updateDatabaseTopologySemaphore = new Semaphore_js_1.Semaphore(); _updateClientConfigurationSemaphore = new Semaphore_js_1.Semaphore(); static _backwardCompatibilityFailureCheckOperation = new GetStatisticsOperation_js_1.GetStatisticsOperation("failure=check"); static _failureCheckOperation = new DatabaseHealthCheckOperation_js_1.DatabaseHealthCheckOperation(); static _useOldFailureCheckOperation = new Set(); _failedNodesTimers = new Map(); _databaseName; _certificate = null; _lastReturnedResponse; _cache; _topologyTakenFromNode; aggressiveCaching = null; _updateTopologyTimer; _nodeSelector; _defaultTimeout; numberOfServerRequests = 0; _disposed; _firstTopologyUpdatePromiseInternal; _httpAgent; /* we don't initialize this here due to issue with cloudflare see: https://github.com/cloudflare/miniflare/issues/292 */ static KEEP_ALIVE_HTTP_AGENT = null; static HTTPS_AGENT_CACHE = new Map(); get firstTopologyUpdatePromise() { return this._firstTopologyUpdatePromiseInternal; } set firstTopologyUpdatePromise(value) { this._firstTopologyUpdatePromiseInternal = value; if (value) { this._firstTopologyUpdateStatus = PromiseUtil_js_2.PromiseStatusTracker.track(value); } } _firstTopologyUpdateStatus; _lastKnownUrls; _clientConfigurationEtag = "0"; _topologyEtag = 0; _conventions; _authOptions; _disableTopologyUpdates; _disableClientConfigurationUpdates; _topologyHeaderName = Constants_js_1.HEADERS.TOPOLOGY_ETAG; _lastServerVersion; _customHttpRequestOptions; _defaultRequestOptions; static requestPostProcessor = null; get customHttpRequestOptions() { return this._customHttpRequestOptions; } set customHttpRequestOptions(value) { this._customHttpRequestOptions = value; this._setDefaultRequestOptions(); } getAuthOptions() { return this._authOptions; } getTopologyEtag() { return this._topologyEtag; } get lastServerVersion() { return this._lastServerVersion; } get defaultTimeout() { return this._defaultTimeout; } set defaultTimeout(timeout) { this._defaultTimeout = timeout; } _secondBroadcastAttemptTimeout; get secondBroadcastAttemptTimeout() { return this._secondBroadcastAttemptTimeout; } set secondBroadcastAttemptTimeout(timeout) { this._secondBroadcastAttemptTimeout = timeout; } _firstBroadcastAttemptTimeout; get firstBroadcastAttemptTimeout() { return this._firstBroadcastAttemptTimeout; } set firstBroadcastAttemptTimeout(timeout) { this._firstBroadcastAttemptTimeout = timeout; } on(event, handler) { this._emitter.on(event, handler); return this; } off(event, handler) { this._emitter.off(event, handler); return this; } _onFailedRequestInvoke(url, e, req, response) { const args = new SessionEvents_js_1.FailedRequestEventArgs(this._databaseName, url, e, req, response); this._emitter.emit("failedRequest", args); } get conventions() { return this._conventions; } getClientConfigurationEtag() { return this._clientConfigurationEtag; } get cache() { return this._cache; } get disposed() { return this._disposed; } getUrl() { if (!this._nodeSelector) { return null; } const preferredNode = this._nodeSelector.getPreferredNode(); return preferredNode ? preferredNode.currentNode.url : null; } getTopology() { return this._nodeSelector ? this._nodeSelector.getTopology() : null; } async getHttpAgent() { if (this.conventions.customFetch) { return null; } if (this._httpAgent) { return this._httpAgent; } return this._httpAgent = await this._createHttpAgent(); } async _createHttpAgent() { if (this._certificate) { const agentOptions = this._certificate.toAgentOptions(); const cacheKey = JSON.stringify(agentOptions, null, 0); if (RequestExecutor.HTTPS_AGENT_CACHE.has(cacheKey)) { return RequestExecutor.HTTPS_AGENT_CACHE.get(cacheKey); } else { const agent = await RequestExecutor.createAgent(agentOptions); RequestExecutor.HTTPS_AGENT_CACHE.set(cacheKey, agent); return agent; } } else { return RequestExecutor.KEEP_ALIVE_HTTP_AGENT ??= await RequestExecutor.createAgent({ pipelining: 0 }); } } compressionHeaders(params) { if (this._conventions.useHttpDecompression) { // do nothing - node.js sends 'accept-encoding: gzip, deflate' by default } else { // disable response compression let { headers } = params; if (!headers) { params.headers = headers = {}; } headers["Accept-Encoding"] = "identity"; } } static async createAgent(options) { try { let UndiciAgent; try { const undiciModule = await import((0, ImportUtil_js_1.importFix)("undici")); UndiciAgent = undiciModule.Agent; } catch (err) { const undiciModule = await import("undici"); UndiciAgent = undiciModule.Agent; } if (!UndiciAgent) { throw new Error("Agent not found in undici module"); } return new UndiciAgent(options); } catch (err) { // If we can't import undici - we might be in cloudflare env - simply return no-agent. return null; } } getTopologyNodes() { const topology = this.getTopology(); return topology ? [...topology.nodes] : null; } constructor(database, authOptions, conventions) { this._log = (0, LogUtil_js_1.getLogger)({ module: `${this.constructor.name}-${Math.floor(Math.random() * 10000)}` }); this._cache = new HttpCache_js_1.HttpCache(conventions.maxHttpCacheSize); this._databaseName = database; this._lastReturnedResponse = new Date(); this._conventions = conventions.clone(); this._authOptions = authOptions; this._certificate = Certificate_js_1.Certificate.createFromOptions(this._authOptions); this._setDefaultRequestOptions(); this._defaultTimeout = conventions.requestTimeout; this._secondBroadcastAttemptTimeout = conventions.secondBroadcastAttemptTimeout; this._firstBroadcastAttemptTimeout = conventions.firstBroadcastAttemptTimeout; } static create(initialUrls, database, opts) { const { authOptions, documentConventions } = opts || {}; const executor = new RequestExecutor(database, authOptions, documentConventions); executor.firstTopologyUpdatePromise = executor._firstTopologyUpdate(initialUrls, RequestExecutor.getGlobalApplicationIdentifier()); // this is just to get rid of unhandled rejection, we're handling it later on executor.firstTopologyUpdatePromise.catch(TypeUtil_js_1.TypeUtil.NOOP); return executor; } static getGlobalApplicationIdentifier() { // due to cloudflare constraints we can't init GLOBAL_APPLICATION_IDENTIFIER in static if (!this.GLOBAL_APPLICATION_IDENTIFIER) { this.GLOBAL_APPLICATION_IDENTIFIER = (0, node_crypto_1.randomUUID)(); } return this.GLOBAL_APPLICATION_IDENTIFIER; } static createForSingleNodeWithConfigurationUpdates(url, database, opts) { const executor = this.createForSingleNodeWithoutConfigurationUpdates(url, database, opts); executor._disableClientConfigurationUpdates = false; return executor; } static createForSingleNodeWithoutConfigurationUpdates(url, database, opts) { const { authOptions, documentConventions } = opts; const initialUrls = RequestExecutor.validateUrls([url], authOptions); const executor = new RequestExecutor(database, authOptions, documentConventions); const topology = new Topology_js_1.Topology(); topology.etag = -1; const serverNode = new ServerNode_js_1.ServerNode({ url: initialUrls[0], database, serverRole: "Member" }); topology.nodes = [serverNode]; executor._nodeSelector = new NodeSelector_js_1.NodeSelector(topology); executor._topologyEtag = RequestExecutor.INITIAL_TOPOLOGY_ETAG; executor._disableTopologyUpdates = true; executor._disableClientConfigurationUpdates = true; executor.firstTopologyUpdatePromise = executor._singleTopologyUpdateAsync(initialUrls, this.GLOBAL_APPLICATION_IDENTIFIER); return executor; } async _updateClientConfiguration(serverNode) { if (this._disposed) { return; } let semAcquiredContext; try { semAcquiredContext = (0, SemaphoreUtil_js_1.acquireSemaphore)(this._updateClientConfigurationSemaphore); await semAcquiredContext.promise; await this._updateClientConfigurationInternal(serverNode); } finally { if (semAcquiredContext) { semAcquiredContext.dispose(); } } } async _updateClientConfigurationInternal(serverNode) { const oldDisableClientConfigurationUpdates = this._disableClientConfigurationUpdates; this._disableClientConfigurationUpdates = true; try { if (this._disposed) { return; } const command = new GetClientConfigurationOperation_js_1.GetClientConfigurationCommand(); await this.execute(command, null, { chosenNode: serverNode, nodeIndex: null, shouldRetry: false }); const clientConfigOpResult = command.result; if (!clientConfigOpResult) { return; } this._conventions.updateFrom(clientConfigOpResult.configuration); this._clientConfigurationEtag = clientConfigOpResult.etag; } catch (err) { this._log.error(err, "Error getting client configuration."); } finally { this._disableClientConfigurationUpdates = oldDisableClientConfigurationUpdates; } } async updateTopology(parameters) { if (this._disableTopologyUpdates) { return false; } if (this._disposed) { return false; } const acquiredSemContext = (0, SemaphoreUtil_js_1.acquireSemaphore)(this._updateDatabaseTopologySemaphore, { timeout: parameters.timeoutInMs }); try { await acquiredSemContext.promise; if (this._disposed) { return false; } this._log.info(`Update topology from ${parameters.node.url}.`); const getTopology = new GetDatabaseTopologyCommand_js_1.GetDatabaseTopologyCommand(parameters.debugTag, this.conventions.sendApplicationIdentifier ? parameters.applicationIdentifier : null); if (this._defaultTimeout != null && this._defaultTimeout > getTopology.timeout) { getTopology.timeout = this._defaultTimeout; } await this.execute(getTopology, null, { chosenNode: parameters.node, nodeIndex: null, shouldRetry: false, }); const topology = getTopology.result; if (!this._nodeSelector) { this._nodeSelector = new NodeSelector_js_1.NodeSelector(topology); if (this.conventions.readBalanceBehavior === "FastestNode") { this._nodeSelector.scheduleSpeedTest(); } } else if (this._nodeSelector.onUpdateTopology(topology, parameters.forceUpdate)) { this._disposeAllFailedNodesTimers(); if (this.conventions.readBalanceBehavior === "FastestNode" && this._nodeSelector.inSpeedTestPhase()) { this._nodeSelector.scheduleSpeedTest(); } } this._topologyEtag = this._nodeSelector.getTopology().etag; this._onTopologyUpdatedInvoke(topology, parameters.debugTag); return true; } catch (reason) { if (reason.name === "TimeoutError") { return false; } throw reason; } finally { acquiredSemContext.dispose(); } } _updateNodeSelector(topology, forceUpdate) { if (!this._nodeSelector) { this._nodeSelector = new NodeSelector_js_1.NodeSelector(topology); if (this.conventions.readBalanceBehavior === "FastestNode") { this._nodeSelector.scheduleSpeedTest(); } } else if (this._nodeSelector.onUpdateTopology(topology, forceUpdate)) { this._disposeAllFailedNodesTimers(); if (this.conventions.readBalanceBehavior === "FastestNode") { this._nodeSelector.scheduleSpeedTest(); } } this._topologyEtag = this._nodeSelector.getTopology().etag; } _disposeAllFailedNodesTimers() { for (const item of this._failedNodesTimers) { item[1].dispose(); } this._failedNodesTimers.clear(); } execute(command, sessionInfo, options) { if (options) { return this._executeOnSpecificNode(command, sessionInfo, options); } this._log.info(`Execute command ${command.constructor.name}`); const topologyUpdate = this.firstTopologyUpdatePromise; const topologyUpdateStatus = this._firstTopologyUpdateStatus; if ((topologyUpdate && topologyUpdateStatus.isResolved())) { const currentIndexAndNode = this.chooseNodeForRequest(command, sessionInfo); return this._executeOnSpecificNode(command, sessionInfo, { chosenNode: currentIndexAndNode.currentNode, nodeIndex: currentIndexAndNode.currentIndex, shouldRetry: true }); } else { return this._unlikelyExecute(command, topologyUpdate, sessionInfo); } } chooseNodeForRequest(cmd, sessionInfo) { if (!StringUtil_js_1.StringUtil.isNullOrWhitespace(cmd.selectedNodeTag)) { const promotables = this._nodeSelector.getTopology().promotables; for (const node of promotables) { if (node.clusterTag === cmd.selectedNodeTag) { return new CurrentIndexAndNode_js_1.default(null, node); } } return this._nodeSelector.getRequestedNode(cmd.selectedNodeTag); } if (this.conventions.loadBalanceBehavior === "UseSessionContext") { if (sessionInfo && sessionInfo.canUseLoadBalanceBehavior()) { return this._nodeSelector.getNodeBySessionId(sessionInfo.getSessionId()); } } if (!cmd.isReadRequest) { return this._nodeSelector.getPreferredNode(); } switch (this.conventions.readBalanceBehavior) { case "None": { return this._nodeSelector.getPreferredNode(); } case "RoundRobin": { return this._nodeSelector.getNodeBySessionId(sessionInfo ? sessionInfo.getSessionId() : 0); } case "FastestNode": { return this._nodeSelector.getFastestNode(); } default: { (0, index_js_1.throwError)("NotSupportedException", `Invalid read balance behavior: ${this.conventions.readBalanceBehavior}`); } } } async _unlikelyExecute(command, topologyUpdate, sessionInfo) { await this._waitForTopologyUpdate(topologyUpdate); const currentIndexAndNode = this.chooseNodeForRequest(command, sessionInfo); return this._executeOnSpecificNode(command, sessionInfo, { chosenNode: currentIndexAndNode.currentNode, nodeIndex: currentIndexAndNode.currentIndex, shouldRetry: true }); } async _waitForTopologyUpdate(topologyUpdate) { try { if (!this.firstTopologyUpdatePromise) { if (!this._lastKnownUrls) { // shouldn't happen (0, index_js_1.throwError)("InvalidOperationException", "No known topology and no previously known one, cannot proceed, likely a bug"); } if (!this._disableTopologyUpdates) { topologyUpdate = this._firstTopologyUpdate(this._lastKnownUrls, null); } else { topologyUpdate = this._singleTopologyUpdateAsync(this._lastKnownUrls, null); } } await topologyUpdate; } catch (reason) { if (this.firstTopologyUpdatePromise === topologyUpdate) { this.firstTopologyUpdatePromise = null; // next request will raise it } this._log.warn(reason, "Error doing topology update."); throw reason; } } _updateTopologyCallback() { const time = new Date(); const fiveMinutes = 5 * 60 * 1000; if (time.valueOf() - this._lastReturnedResponse.valueOf() <= fiveMinutes) { return; } let serverNode; try { const selector = this._nodeSelector; if (!selector) { return; } const preferredNode = selector.getPreferredNode(); serverNode = preferredNode.currentNode; } catch (err) { this._log.warn(err, "Couldn't get preferred node Topology from _updateTopologyTimer"); return; } const updateParameters = new UpdateTopologyParameters_js_1.UpdateTopologyParameters(serverNode); updateParameters.timeoutInMs = 0; updateParameters.debugTag = "timer-callback"; return this.updateTopology(updateParameters) .catch(err => { this._log.error(err, "Couldn't update topology from _updateTopologyTimer"); return null; }); } async _singleTopologyUpdateAsync(initialUrls, applicationIdentifier) { if (this.disposed) { return; } // fetch tag for each of the urls const topology = new Topology_js_1.Topology(this._topologyEtag, []); for (const url of initialUrls) { const serverNode = new ServerNode_js_1.ServerNode({ url, database: this._databaseName }); try { const command = new GetNodeInfoCommand_js_1.GetNodeInfoCommand(); await this.execute(command, null, { chosenNode: serverNode, shouldRetry: false, nodeIndex: null }); serverNode.clusterTag = command.result.nodeTag; serverNode.serverRole = command.result.serverRole; } catch (e) { if (e.name === "AuthorizationException") { // auth exceptions will always happen, on all nodes // so errors immediately this._lastKnownUrls = initialUrls; throw e; } else if (e.name === "DatabaseDoesNotExistException") { // Will happen on all node in the cluster, // so errors immediately this._lastKnownUrls = initialUrls; throw e; } else { serverNode.clusterTag = "!"; } } topology.nodes.push(serverNode); this._updateNodeSelector(topology, true); } this._lastKnownUrls = initialUrls; } async _firstTopologyUpdate(inputUrls, applicationIdentifier) { const initialUrls = RequestExecutor.validateUrls(inputUrls, this._authOptions); const topologyUpdateErrors = []; const tryUpdateTopology = async (url, database) => { const serverNode = new ServerNode_js_1.ServerNode({ url, database, serverRole: "Member" }); try { const updateParameters = new UpdateTopologyParameters_js_1.UpdateTopologyParameters(serverNode); updateParameters.timeoutInMs = TypeUtil_js_1.TypeUtil.MAX_INT32; updateParameters.debugTag = "first-topology-update"; updateParameters.applicationIdentifier = applicationIdentifier; await this.updateTopology(updateParameters); this._initializeUpdateTopologyTimer(); this._topologyTakenFromNode = serverNode; return true; } catch (error) { if (error.name === "AuthorizationException") { this._lastKnownUrls = initialUrls; throw error; } if (error.name === "DatabaseDoesNotExistException") { this._lastKnownUrls = initialUrls; throw error; } topologyUpdateErrors.push({ url, error }); return false; } }; const tryUpdateTopologyOnAllNodes = async () => { for (const url of initialUrls) { if (await tryUpdateTopology(url, this._databaseName)) { return; } } return false; }; await tryUpdateTopologyOnAllNodes(); const topology = new Topology_js_1.Topology(); topology.etag = this._topologyEtag; let topologyNodes = this.getTopologyNodes(); if (!topologyNodes) { topologyNodes = initialUrls.map(url => { const serverNode = new ServerNode_js_1.ServerNode({ url, database: this._databaseName }); serverNode.clusterTag = "!"; return serverNode; }); } topology.nodes = topologyNodes; this._nodeSelector?.dispose(); this._nodeSelector = new NodeSelector_js_1.NodeSelector(topology); if (initialUrls && initialUrls.length > 0) { this._initializeUpdateTopologyTimer(); return; } this._lastKnownUrls = initialUrls; const details = topologyUpdateErrors .map(x => `${x.url} -> ${x.error && x.error.stack ? x.error.stack : x.error}`) .join(", "); this._throwExceptions(details); } _throwExceptions(details) { (0, index_js_1.throwError)("InvalidOperationException", "Failed to retrieve database topology from all known nodes" + OsUtil_js_1.EOL + details); } static validateUrls(initialUrls, authOptions) { const cleanUrls = new Array(initialUrls.length); let requireHttps = !!authOptions?.certificate; for (let index = 0; index < initialUrls.length; index++) { const url = initialUrls[index]; (0, UriUtil_js_1.validateUri)(url); cleanUrls[index] = url.replace(/\/$/, ""); requireHttps = requireHttps || url.startsWith("https://"); } if (!requireHttps) { return cleanUrls; } for (const url of initialUrls) { if (!url.startsWith("http://")) { continue; } if (authOptions && authOptions.certificate) { (0, index_js_1.throwError)("InvalidOperationException", "The url " + url + " is using HTTP, but a certificate is specified, which require us to use HTTPS"); } (0, index_js_1.throwError)("InvalidOperationException", "The url " + url + " is using HTTP, but other urls are using HTTPS, and mixing of HTTP and HTTPS is not allowed."); } return cleanUrls; } _initializeUpdateTopologyTimer() { if (this._updateTopologyTimer || this._disposed) { return; } this._log.info("Initialize update topology timer."); const minInMs = 60 * 1000; this._updateTopologyTimer = new Timer_js_1.Timer(() => this._updateTopologyCallback(), minInMs, minInMs); } async _executeOnSpecificNode(// this method is called `execute` in c# and java code command, sessionInfo = null, options = null) { if (command.failoverTopologyEtag === RequestExecutor.INITIAL_TOPOLOGY_ETAG) { command.failoverTopologyEtag = RequestExecutor.INITIAL_TOPOLOGY_ETAG; if (this._nodeSelector && this._nodeSelector.getTopology()) { const topology = this._nodeSelector.getTopology(); if (topology.etag) { command.failoverTopologyEtag = topology.etag; } } } const { chosenNode, nodeIndex, shouldRetry } = options; this._log.info(`Actual execute ${command.constructor.name} on ${chosenNode.url}` + ` ${shouldRetry ? "with" : "without"} retry.`); let url; const req = this._createRequest(chosenNode, command, u => url = u); const controller = new AbortController(); if (options?.abortRef) { options.abortRef(controller); } req.signal = controller.signal; const noCaching = sessionInfo ? sessionInfo.noCaching : false; let cachedChangeVector; let cachedValue; const cachedItem = this._getFromCache(command, !noCaching, req.uri.toString(), (cachedItemMetadata) => { cachedChangeVector = cachedItemMetadata.changeVector; cachedValue = cachedItemMetadata.response; }); if (cachedChangeVector) { if (await this._tryGetFromCache(command, cachedItem, cachedValue)) { return; } } this._setRequestHeaders(sessionInfo, cachedChangeVector, req); command.numberOfAttempts++; const attemptNum = command.numberOfAttempts; this._emitter.emit("beforeRequest", new SessionEvents_js_1.BeforeRequestEventArgs(this._databaseName, url, req, attemptNum)); const responseAndStream = await this._sendRequestToServer(chosenNode, nodeIndex, command, shouldRetry, sessionInfo, req, url, controller); if (!responseAndStream) { return; } const response = responseAndStream.response; const bodyStream = responseAndStream.bodyStream; const refreshTask = this._refreshIfNeeded(chosenNode, response); command.statusCode = response.status; if (response.status < 400 || command.statusCode === 304) { command.etag = (0, HttpUtil_js_1.getEtagHeader)(response) ?? cachedChangeVector; } let responseDispose = "Automatic"; try { if (response.status === StatusCode_js_1.StatusCodes.NotModified) { this._emitter.emit("succeedRequest", new SessionEvents_js_1.SucceedRequestEventArgs(this._databaseName, url, response, req, attemptNum)); cachedItem.notModified(); await command.responseBehavior.handleNotModified(command, response, cachedValue); return; } if (response.status >= 400) { const unsuccessfulResponseHandled = await this._handleUnsuccessfulResponse(chosenNode, nodeIndex, command, req, response, bodyStream, req.uri, sessionInfo, shouldRetry); if (!unsuccessfulResponseHandled) { const dbMissingHeader = response.headers.get(Constants_js_1.HEADERS.DATABASE_MISSING); if (dbMissingHeader) { (0, index_js_1.throwError)("DatabaseDoesNotExistException", dbMissingHeader); } this._throwFailedToContactAllNodes(command, req); } return; // we either handled this already in the unsuccessful response or we are throwing } this._emitter.emit("succeedRequest", new SessionEvents_js_1.SucceedRequestEventArgs(this._databaseName, url, response, req, attemptNum)); responseDispose = await command.processResponse(this._cache, response, bodyStream, req.uri); this._lastReturnedResponse = new Date(); } finally { if (responseDispose === "Automatic") { (0, HttpUtil_js_1.closeHttpResponse)(response); } await refreshTask; } } async _refreshIfNeeded(chosenNode, response) { const refreshTopology = response && response.headers && response.headers.get(Constants_js_1.HEADERS.REFRESH_TOPOLOGY); const refreshClientConfiguration = response && response.headers && response.headers.get(Constants_js_1.HEADERS.REFRESH_CLIENT_CONFIGURATION); const tasks = []; if (refreshTopology) { const updateParameters = new UpdateTopologyParameters_js_1.UpdateTopologyParameters(chosenNode); updateParameters.timeoutInMs = 0; updateParameters.debugTag = "refresh-topology-header"; tasks.push(this.updateTopology(updateParameters)); } if (refreshClientConfiguration) { tasks.push(this._updateClientConfiguration(chosenNode)); } await Promise.all(tasks); } async _sendRequestToServer(chosenNode, nodeIndex, command, shouldRetry, sessionInfo, request, url, abortController) { try { this.numberOfServerRequests++; const timeout = command.timeout || this._defaultTimeout; if (!TypeUtil_js_1.TypeUtil.isNullOrUndefined(timeout)) { const cancelTask = setTimeout(() => abortController.abort(), timeout); try { return await this._send(chosenNode, command, sessionInfo, request); } catch (error) { if (error.name === "AbortError") { const timeoutException = (0, index_js_1.getError)("TimeoutException", "The request for " + request.uri + " failed with timeout after " + TimeUtil_js_1.TimeUtil.millisToTimeSpan(timeout), error); if (!shouldRetry) { if (!command.failedNodes) { command.failedNodes = new Map(); } command.failedNodes.set(chosenNode, timeoutException); throw timeoutException; } if (!await this._handleServerDown(url, chosenNode, nodeIndex, command, request, null, "", timeoutException, sessionInfo, shouldRetry)) { this._throwFailedToContactAllNodes(command, request); } return null; } throw error; } finally { clearTimeout(cancelTask); } } else { return await this._send(chosenNode, command, sessionInfo, request); } } catch (e) { if (e.name === "AllTopologyNodesDownException") { throw e; } if (e?.cause?.code === "UNABLE_TO_VERIFY_LEAF_SIGNATURE") { if (chosenNode.url.startsWith("https://") && !this.getAuthOptions()?.certificate) { (0, index_js_1.throwError)("AuthorizationException", "This server requires client certificate for authentication, but none was provided by the client.", e); } } // node.js fetch doesn't even send request to server is expected protocol is different from actual, // so we need handle this case differently // https://github.com/nodejs/node/blob/d8c4e375f21b8475d3b717d1d1120ad4eabf8f63/lib/_http_client.js#L157 if (e.code === "ERR_INVALID_PROTOCOL") { if (chosenNode.url.startsWith("https://") && !this.getAuthOptions()?.certificate) { (0, index_js_1.throwError)("AuthorizationException", "This server requires client certificate for authentication, but none was provided by the client.", e); } (0, index_js_1.throwError)("AuthorizationException", "Invalid protocol", e); } if (!shouldRetry) { throw e; } if (!await this._handleServerDown(url, chosenNode, nodeIndex, command, request, null, "", e, sessionInfo, shouldRetry)) { this._throwFailedToContactAllNodes(command, request); } return null; } } async _send(chosenNode, command, sessionInfo, request) { let responseAndStream; if (this._shouldExecuteOnAll(chosenNode, command)) { responseAndStream = await this._executeOnAllToFigureOutTheFastest(chosenNode, command); } else { this.compressionHeaders(request); responseAndStream = await command.send(await this.getHttpAgent(), request); } // PERF: The reason to avoid rechecking every time is that servers wont change so rapidly // and therefore we dimish its cost by orders of magnitude just doing it // once in a while. We dont care also about the potential race conditions that may happen // here mainly because the idea is to have a lax mechanism to recheck that is at least // orders of magnitude faster than currently. if (chosenNode.shouldUpdateServerVersion()) { const serverVersion = RequestExecutor._tryGetServerVersion(responseAndStream.response); if (serverVersion) { chosenNode.updateServerVersion(serverVersion); } } this._lastServerVersion = chosenNode.lastServerVersion; if (sessionInfo && sessionInfo.lastClusterTransactionIndex) { // if we reach here it means that sometime a cluster transaction has occurred against this database. // Since the current executed command can be dependent on that, // we have to wait for the cluster transaction. // But we can't do that if the server is an old one. if (this._lastServerVersion && "4.1".localeCompare(this._lastServerVersion) > 0) { (0, index_js_1.throwError)("ClientVersionMismatchException", "The server on " + chosenNode.url + " has an old version and can't perform " + "the command since this command dependent on a cluster transaction " + " which this node doesn't support."); } } return responseAndStream; } _setRequestHeaders(sessionInfo, cachedChangeVector, req) { if (cachedChangeVector) { req.headers[Constants_js_1.HEADERS.IF_NONE_MATCH] = `"${cachedChangeVector}"`; } if (!this._disableClientConfigurationUpdates) { req.headers[Constants_js_1.HEADERS.CLIENT_CONFIGURATION_ETAG] = this._clientConfigurationEtag; } if (sessionInfo && sessionInfo.lastClusterTransactionIndex) { req.headers[Constants_js_1.HEADERS.LAST_KNOWN_CLUSTER_TRANSACTION_INDEX] = sessionInfo.lastClusterTransactionIndex; } if (!this._disableTopologyUpdates) { req.headers[this._topologyHeaderName] = `"${this._topologyEtag}"`; } if (!req.headers[Constants_js_1.HEADERS.CLIENT_VERSION]) { req.headers[Constants_js_1.HEADERS.CLIENT_VERSION] = RequestExecutor.CLIENT_VERSION; } } async _tryGetFromCache(command, cachedItem, cachedValue) { const aggressiveCacheOptions = this.aggressiveCaching; if (aggressiveCacheOptions && cachedItem.age < aggressiveCacheOptions.duration && !cachedItem.mightHaveBeenModified && command.canCacheAggressively) { if (cachedItem.item.flags === "NotFound") { // if this is a cached delete, we only respect it if it _came_ from an aggressively cached // block, otherwise, we'll run the request again return false; } else { await command.setResponseFromCache(cachedValue); return true; } } return false; } static _tryGetServerVersion(response) { return response.headers.get(Constants_js_1.HEADERS.SERVER_VERSION); } _throwFailedToContactAllNodes(command, req) { if (!command.failedNodes || !command.failedNodes.size) { //precaution, should never happen at this point (0, index_js_1.throwError)("InvalidOperationException", "Received unsuccessful response and couldn't recover from it. " + "Also, no record of exceptions per failed nodes. This is weird and should not happen."); } if (command.failedNodes.size === 1) { throw Array.from(command.failedNodes.values())[0]; } let message = "Tried to send " + command.constructor.name + " request via " + (req.method || "GET") + " " + req.uri + " to all configured nodes in the topology, " + "none of the attempt succeeded." + OsUtil_js_1.EOL; if (this._topologyTakenFromNode) { message += "I was able to fetch " + this._topologyTakenFromNode.database + " topology from " + this._topologyTakenFromNode.url + "." + OsUtil_js_1.EOL; } let nodes; if (this._nodeSelector && this._nodeSelector.getTopology()) { nodes = this._nodeSelector.getTopology().nodes; } if (!nodes) { message += "Topology is empty."; } else { message += "Topology: "; for (const node of nodes) { const error = command.failedNodes.get(node); message += OsUtil_js_1.EOL + "[Url: " + node.url + ", " + "ClusterTag: " + node.clusterTag + ", " + "ServerRole: " + node.serverRole + ", " + "Exception: " + (error ? error.message : "No exception") + "]"; } } (0, index_js_1.throwError)("AllTopologyNodesDownException", message); } inSpeedTestPhase() { return this._nodeSelector && this._nodeSelector.inSpeedTestPhase(); } _shouldExecuteOnAll(chosenNode, command) { return this.conventions.readBalanceBehavior === "FastestNode" && this._nodeSelector && this._nodeSelector.inSpeedTestPhase() && this._nodeSelectorHasMultipleNodes() && command.isReadRequest && command.responseType === "Object" && !!chosenNode && !(command["prepareToBroadcast"]); // duck typing: !(command instanceof IBroadcast) } async _executeOnAllToFigureOutTheFastest(chosenNode, command) { let preferredTask = null; const nodes = this._nodeSelector.getTopology().nodes; const tasks = nodes.map(x => null); let task; for (let i = 0; i < nodes.length; i++) { const taskNumber = i; this.numberOfServerRequests++; const agent = await this.getHttpAgent(); task = Promise.resolve() .then(() => { const req = this._createRequest(nodes[taskNumber], command, TypeUtil_js_1.TypeUtil.NOOP); if (!req) { return; } this._setRequestHeaders(null, null, req); this.compressionHeaders(req); return command.send(agent, req); }) .then(commandResult => new IndexAndResponse(taskNumber, commandResult.response, commandResult.bodyStream)) .catch(err => { tasks[taskNumber] = null; throw err; }); if (nodes[i].clusterTag === chosenNode.clusterTag) { preferredTask = task; } tasks[i] = task; } const result = (0, PromiseUtil_js_1.raceToResolution)(tasks) .then(fastest => { this._nodeSelector.recordFastest(fastest.index, nodes[fastest.index]); }) .catch((err) => { this._log.warn(err, "Error executing on all to find fastest node."); }) .then(() => preferredTask); return Promise.resolve(result); } _getFromCache(command, useCache, url, cachedItemMetadataCallback) { if (useCache && command.canCache && command.isReadRequest && command.responseType === "Object") { return this._cache.get(url, cachedItemMetadataCallback); } cachedItemMetadataCallback({ changeVector: null, response: null }); return new HttpCache_js_1.ReleaseCacheItem(null); } _nodeSelectorHasMultipleNodes() { const selector = this._nodeSelector; if (!selector) { return false; } const topology = selector.getTopology(); return topology && topology.nodes && topology.nodes.length > 1; } _createRequest(node, command, urlRef) { const request = command.createRequest(node); if (!request) { return null; } if (this.conventions.customFetch) { request.fetcher = this.conventions.customFetch; } const req = Object.assign(request, this._defaultRequestOptions); urlRef(req.uri); req.headers = req.headers || {}; let builder = new URL(req.uri); if (RequestExecutor.requestPostProcessor) { RequestExecutor.requestPostProcessor(req); } if (command["getRaftUniqueRequestId"]) { const raftCommand = command; builder = RequestExecutor.appendToQuery(builder, "raft-request-id", raftCommand.getRaftUniqueRequestId()); } if (command.selectedNodeTag) { builder = RequestExecutor.appendToQuery(builder, Constants_js_1.QUERY_STRING.NODE_TAG, command.selectedNodeTag); } if (!TypeUtil_js_1.TypeUtil.isNullOrUndefined(command.selectedShardNumber)) { builder = RequestExecutor.appendToQuery(builder, Constants_js_1.QUERY_STRING.SHARD_NUMBER, command.selectedShardNumber); } if (this._shouldBroadcast(command)) { command.timeout = command.timeout ?? this.firstBroadcastAttemptTimeout; } req.uri = builder.toString(); return req; } static appendToQuery(builder, key, value) { const joinCharacter = builder.search ? "&" : "?"; return new URL(builder.toString() + joinCharacter + key + "=" + encodeURIComponent(value)); } async _handleUnsuccessfulResponse(chosenNode, nodeIndex, command, req, response, responseBodyStream, url, sessionInfo, shouldRetry) { responseBodyStream.resume(); const readBody = () => (0, StreamUtil_js_1.readToEnd)(responseBodyStream); switch (response.status) { case StatusCode_js_1.StatusCodes.NotFound: { this._cache.setNotFound(url); return command.responseBehavior.tryHandleNotFound(command, response); } case StatusCode_js_1.StatusCodes.Forbidden: { const msg = await readBody(); (0, index_js_1.throwError)("AuthorizationException", `Forbidden access to ${chosenNode.database}@${chosenNode.url}` + `, ${req.method || "GET"} ${req.uri}` + OsUtil_js_1.EOL + msg); break; } case StatusCode_js_1.StatusCodes.Gone: { // request not relevant for the chosen node - the database has been moved to a different one if (!shouldRetry) { return false; } if (nodeIndex != null) { this._nodeSelector.onFailedRequest(nodeIndex); } if (!command.failedNodes) { command.failedNodes = new Map(); } if (command.isFailedWithNode(chosenNode)) { command.failedNodes.set(chosenNode, (0, index_js_1.getError)("UnsuccessfulRequestException", "Request to " + url + "(" + req.method + ") is not relevant for this node anymore.")); } let indexAndNode = this.chooseNodeForRequest(command, sessionInfo); if (command.failedNodes.has(indexAndNode.currentNode)) { // we tried all the nodes, let's try to update topology and retry one more time const updateParameters = new UpdateTopologyParameters_js_1.UpdateTopologyParameters(chosenNode); updateParameters.timeoutInMs = 60_000; updateParameters.debugTag = "handle-unsuccessful-response"; const success = await this.updateTopology(updateParameters);