@microsoft/1ds-post-js
Version:
Microsoft Application Insights JavaScript SDK - 1ds-post-channel-js
873 lines (872 loc) • 58.2 kB
JavaScript
/*
* 1DS JS SDK POST plugin, 4.3.9
* Copyright (c) Microsoft and contributors. All rights reserved.
* (Microsoft Internal Only)
*/
import { __extendsFn as __extends } from "@microsoft/applicationinsights-shims";
/**
* PostManager.ts
* @author Abhilash Panwar (abpanwar); Hector Hernandez (hectorh); Nev Wylie (newylie)
* @copyright Microsoft 2018-2020
*/
import dynamicProto from "@microsoft/dynamicproto-js";
import { BaseTelemetryPlugin, EventsDiscardedReason, _throwInternal, addPageHideEventListener, addPageShowEventListener, addPageUnloadEventListener, arrForEach, createProcessTelemetryContext, createUniqueNamespace, doPerf, getWindow, isChromium, isGreaterThanZero, isNumber, mergeEvtNamespace, objForEachKey, onConfigChange, optimizeObject, proxyFunctions, removePageHideEventListener, removePageShowEventListener, removePageUnloadEventListener, setProcessTelemetryTimings } from "@microsoft/1ds-core-js";
import { createPromise } from "@nevware21/ts-async";
import { isPromiseLike, mathCeil, mathMax, mathMin, objDeepFreeze } from "@nevware21/ts-utils";
import { BE_PROFILE, NRT_PROFILE, RT_PROFILE } from "./DataModels";
import { EventBatch } from "./EventBatch";
import { HttpManager } from "./HttpManager";
import { STR_AUTH_WEB_TOKEN, STR_MSA_DEVICE_TICKET, STR_TRACE, STR_USER } from "./InternalConstants";
import { retryPolicyGetMillisToBackoffForRetry } from "./RetryPolicy";
import { createTimeoutWrapper } from "./TimeoutOverrideWrapper";
import { _DYN_AUTO_FLUSH_EVENTS_LI14, _DYN_BATCHES, _DYN_CAN_SEND_REQUEST, _DYN_CLEAR_TIMEOUT_OVERRI3, _DYN_CONCAT, _DYN_COUNT, _DYN_CREATE_ONE_DSPAYLOAD, _DYN_DISABLE_AUTO_BATCH_F15, _DYN_EVENTS, _DYN_GET_OFFLINE_REQUEST_9, _DYN_GET_WPARAM, _DYN_IDENTIFIER, _DYN_IGNORE_MC1_MS0_COOKI13, _DYN_INITIALIZE, _DYN_I_KEY, _DYN_LATENCY, _DYN_LENGTH, _DYN_OVERRIDE_INSTRUMENTA16, _DYN_PUSH, _DYN_SEND_ATTEMPT, _DYN_SEND_QUEUED_REQUESTS, _DYN_SEND_SYNCHRONOUS_BAT10, _DYN_SERIALIZE_OFFLINE_EV8, _DYN_SET_TIMEOUT_OVERRIDE, _DYN_SET_UNLOADING, _DYN_SPLIT, _DYN_SYNC, _DYN__BACK_OFF_TRANSMISSI12 } from "./__DynamicConstants";
var FlushCheckTimer = 0.250; // This needs to be in seconds, so this is 250ms
var MaxNumberEventPerBatch = 500;
var EventsDroppedAtOneTime = 20;
var MaxSendAttempts = 6;
var MaxSyncUnloadSendAttempts = 2; // Assuming 2 based on beforeunload and unload
var MaxBackoffCount = 4;
var MaxConnections = 2;
var MaxRequestRetriesBeforeBackoff = 1;
var MaxEventsLimitInMem = 10000;
var strEventsDiscarded = "eventsDiscarded";
var EMPTY_STR = "";
var undefValue = undefined;
/**
* The default settings for the config.
* WE MUST include all defaults here to ensure that the config is created with all of the properties
* defined as dynamic.
*/
var defaultPostChannelConfig = objDeepFreeze({
eventsLimitInMem: { isVal: isGreaterThanZero, v: MaxEventsLimitInMem },
immediateEventLimit: { isVal: isGreaterThanZero, v: 500 },
autoFlushEventsLimit: { isVal: isGreaterThanZero, v: 0 },
disableAutoBatchFlushLimit: false,
httpXHROverride: { isVal: isOverrideFn, v: undefValue },
overrideInstrumentationKey: undefValue,
overrideEndpointUrl: undefValue,
disableTelemetry: false,
ignoreMc1Ms0CookieProcessing: false,
setTimeoutOverride: undefValue,
clearTimeoutOverride: undefValue,
payloadPreprocessor: undefValue,
payloadListener: undefValue,
disableEventTimings: undefValue,
valueSanitizer: undefValue,
stringifyObjects: undefValue,
enableCompoundKey: undefValue,
disableOptimizeObj: false,
fetchCredentials: undefValue,
// disableCacheHeader: undefValue, // See Task #7178858 - Collector requires a change to support this
transports: undefValue,
unloadTransports: undefValue,
useSendBeacon: undefValue,
disableFetchKeepAlive: undefValue,
avoidOptions: false,
xhrTimeout: undefValue,
disableXhrSync: undefValue,
alwaysUseXhrOverride: false,
maxEventRetryAttempts: { isVal: isNumber, v: MaxSendAttempts },
maxUnloadEventRetryAttempts: { isVal: isNumber, v: MaxSyncUnloadSendAttempts },
addNoResponse: undefValue,
maxEvtPerBatch: { isVal: isNumber, v: MaxNumberEventPerBatch },
excludeCsMetaData: undefValue,
requestLimit: {}
});
function isOverrideFn(httpXHROverride) {
return httpXHROverride && httpXHROverride.sendPOST;
}
/**
* Class that manages adding events to inbound queues and batching of events
* into requests.
* @group Classes
* @group Entrypoint
*/
var PostChannel = /** @class */ (function (_super) {
__extends(PostChannel, _super);
function PostChannel() {
var _this = _super.call(this) || this;
_this.identifier = "PostChannel";
_this.priority = 1011;
_this.version = '4.3.9';
var _postConfig;
var _isTeardownCalled = false;
var _flushCallbackQueue = [];
var _flushCallbackTimer;
var _paused = false;
var _immediateQueueSize = 0;
var _immediateQueueSizeLimit;
var _queueSize = 0;
var _queueSizeLimit;
var _profiles = {};
var _currentProfile = RT_PROFILE;
var _scheduledTimer;
var _immediateTimer;
var _currentBackoffCount;
var _timerCount;
var _httpManager;
var _batchQueues;
var _autoFlushEventsLimit;
// either MaxBatchSize * (1+ Max Connections) or _queueLimit / 6 (where 3 latency Queues [normal, realtime, cost deferred] * 2 [allow half full -- allow for retry])
var _autoFlushBatchLimit;
var _delayedBatchSendLatency;
var _delayedBatchReason;
var _optimizeObject;
var _isPageUnloadTriggered;
var _maxEventSendAttempts;
var _maxUnloadEventSendAttempts;
var _evtNamespace;
var _timeoutWrapper;
var _ignoreMc1Ms0CookieProcessing;
var _disableAutoBatchFlushLimit;
var _notificationManager;
var _unloadHandlersAdded;
var _overrideInstrumentationKey;
var _disableTelemetry;
var _maxEvtPerBatch;
dynamicProto(PostChannel, _this, function (_self, _base) {
_initDefaults();
// Special internal method to allow the DebugPlugin to hook embedded objects
_self["_getDbgPlgTargets"] = function () {
return [_httpManager, _postConfig];
};
_self[_DYN_INITIALIZE /* @min:%2einitialize */] = function (theConfig, core, extensions) {
doPerf(core, function () { return "PostChannel:initialize"; }, function () {
_base[_DYN_INITIALIZE /* @min:%2einitialize */](theConfig, core, extensions);
_notificationManager = core.getNotifyMgr();
try {
_evtNamespace = mergeEvtNamespace(createUniqueNamespace(_self[_DYN_IDENTIFIER /* @min:%2eidentifier */]), core.evtNamespace && core.evtNamespace());
_self._addHook(onConfigChange(theConfig, function (details) {
var coreConfig = details.cfg;
var ctx = createProcessTelemetryContext(null, coreConfig, core);
_postConfig = ctx.getExtCfg(_self[_DYN_IDENTIFIER /* @min:%2eidentifier */], defaultPostChannelConfig);
_timeoutWrapper = createTimeoutWrapper(_postConfig[_DYN_SET_TIMEOUT_OVERRIDE /* @min:%2esetTimeoutOverride */], _postConfig[_DYN_CLEAR_TIMEOUT_OVERRI3 /* @min:%2eclearTimeoutOverride */]);
// Only try and use the optimizeObject() if this appears to be a chromium based browser and it has not been explicitly disabled
_optimizeObject = !_postConfig.disableOptimizeObj && isChromium();
_ignoreMc1Ms0CookieProcessing = _postConfig[_DYN_IGNORE_MC1_MS0_COOKI13 /* @min:%2eignoreMc1Ms0CookieProcessing */];
_hookWParam(core); // _hookWParam uses _ignoreMc1Ms0CookieProcessing
_queueSizeLimit = _postConfig.eventsLimitInMem;
_immediateQueueSizeLimit = _postConfig.immediateEventLimit;
_autoFlushEventsLimit = _postConfig[_DYN_AUTO_FLUSH_EVENTS_LI14 /* @min:%2eautoFlushEventsLimit */];
_maxEventSendAttempts = _postConfig.maxEventRetryAttempts;
_maxUnloadEventSendAttempts = _postConfig.maxUnloadEventRetryAttempts;
_disableAutoBatchFlushLimit = _postConfig[_DYN_DISABLE_AUTO_BATCH_F15 /* @min:%2edisableAutoBatchFlushLimit */];
_maxEvtPerBatch = _postConfig.maxEvtPerBatch;
if (isPromiseLike(coreConfig.endpointUrl)) {
_self.pause();
}
else if (!!_paused) {
// if previous url is promise, resume
_self.resume();
}
_setAutoLimits();
// Override iKey if provided in Post config if provided for during initialization
_overrideInstrumentationKey = _postConfig[_DYN_OVERRIDE_INSTRUMENTA16 /* @min:%2eoverrideInstrumentationKey */];
// DisableTelemetry was defined in the config provided during initialization
_disableTelemetry = !!_postConfig.disableTelemetry;
if (_unloadHandlersAdded) {
_removeUnloadHandlers();
}
var excludePageUnloadEvents = coreConfig.disablePageUnloadEvents || [];
// When running in Web browsers try to send all telemetry if page is unloaded
_unloadHandlersAdded = addPageUnloadEventListener(_handleUnloadEvents, excludePageUnloadEvents, _evtNamespace);
_unloadHandlersAdded = addPageHideEventListener(_handleUnloadEvents, excludePageUnloadEvents, _evtNamespace) || _unloadHandlersAdded;
_unloadHandlersAdded = addPageShowEventListener(_handleShowEvents, coreConfig.disablePageShowEvents, _evtNamespace) || _unloadHandlersAdded;
}));
// only initialize the manager once
_httpManager[_DYN_INITIALIZE /* @min:%2einitialize */](theConfig, _self.core, _self);
}
catch (e) {
// resetting the initialized state because of failure
_self.setInitialized(false);
throw e;
}
}, function () { return ({ theConfig: theConfig, core: core, extensions: extensions }); });
};
_self.processTelemetry = function (ev, itemCtx) {
setProcessTelemetryTimings(ev, _self[_DYN_IDENTIFIER /* @min:%2eidentifier */]);
itemCtx = itemCtx || _self._getTelCtx(itemCtx);
var event = ev;
if (!_disableTelemetry && !_isTeardownCalled) {
// Override iKey if provided in Post config if provided for during initialization
if (_overrideInstrumentationKey) {
event[_DYN_I_KEY /* @min:%2eiKey */] = _overrideInstrumentationKey;
}
_addEventToQueues(event, true);
if (_isPageUnloadTriggered) {
// Unload event has been received so we need to try and flush new events
_releaseAllQueues(2 /* EventSendType.SendBeacon */, 2 /* SendRequestReason.Unload */);
}
else {
_scheduleTimer();
}
}
_self.processNext(event, itemCtx);
};
_self.getOfflineSupport = function () {
try {
var details_1 = _httpManager && _httpManager[_DYN_GET_OFFLINE_REQUEST_9 /* @min:%2egetOfflineRequestDetails */]();
if (_httpManager) {
return {
getUrl: function () {
if (details_1) {
return details_1.url;
}
return null;
},
serialize: _serialize,
batch: _batch,
shouldProcess: function (evt) {
return !_disableTelemetry;
},
createPayload: function (evt) {
return null;
},
createOneDSPayload: function (evts) {
if (_httpManager[_DYN_CREATE_ONE_DSPAYLOAD /* @min:%2ecreateOneDSPayload */]) {
return _httpManager[_DYN_CREATE_ONE_DSPAYLOAD /* @min:%2ecreateOneDSPayload */](evts, _optimizeObject);
}
}
};
}
}
catch (e) {
// eslint-disable-next-line no-empty
}
return null;
};
_self._doTeardown = function (unloadCtx, unloadState) {
_releaseAllQueues(2 /* EventSendType.SendBeacon */, 2 /* SendRequestReason.Unload */);
_isTeardownCalled = true;
_httpManager.teardown();
_removeUnloadHandlers();
// Just register to remove all events associated with this namespace
_initDefaults();
};
function _removeUnloadHandlers() {
removePageUnloadEventListener(null, _evtNamespace);
removePageHideEventListener(null, _evtNamespace);
removePageShowEventListener(null, _evtNamespace);
}
function _hookWParam(core) {
var existingGetWParamMethod = core[_DYN_GET_WPARAM /* @min:%2egetWParam */];
core[_DYN_GET_WPARAM /* @min:%2egetWParam */] = function () {
var wparam = 0;
if (_ignoreMc1Ms0CookieProcessing) {
wparam = wparam | 2;
}
return wparam | existingGetWParamMethod.call(core);
};
}
function _batch(arr) {
var rlt = EMPTY_STR;
if (arr && arr[_DYN_LENGTH /* @min:%2elength */]) {
arrForEach(arr, function (item) {
if (rlt) {
rlt += "\n";
}
rlt += item;
});
}
return rlt;
}
function _serialize(event) {
var rlt = EMPTY_STR;
try {
_cleanEvent(event);
rlt = _httpManager[_DYN_SERIALIZE_OFFLINE_EV8 /* @min:%2eserializeOfflineEvt */](event);
}
catch (e) {
// eslint-disable-next-line no-empty
}
return rlt;
}
// Moving event handlers out from the initialize closure so that any local variables can be garbage collected
function _handleUnloadEvents(evt) {
var theEvt = evt || getWindow().event; // IE 8 does not pass the event
if (theEvt.type !== "beforeunload") {
// Only set the unload trigger if not beforeunload event as beforeunload can be cancelled while the other events can't
_isPageUnloadTriggered = true;
_httpManager[_DYN_SET_UNLOADING /* @min:%2esetUnloading */](_isPageUnloadTriggered);
}
_releaseAllQueues(2 /* EventSendType.SendBeacon */, 2 /* SendRequestReason.Unload */);
}
function _handleShowEvents(evt) {
// Handle the page becoming visible again
_isPageUnloadTriggered = false;
_httpManager[_DYN_SET_UNLOADING /* @min:%2esetUnloading */](_isPageUnloadTriggered);
}
function _cleanEvent(event) {
if (event.ext && event.ext[STR_TRACE]) {
delete (event.ext[STR_TRACE]);
}
if (event.ext && event.ext[STR_USER] && event.ext[STR_USER]["id"]) {
delete (event.ext[STR_USER]["id"]);
}
// v8 performance optimization for iterating over the keys
if (_optimizeObject) {
event.ext = optimizeObject(event.ext);
if (event.baseData) {
event.baseData = optimizeObject(event.baseData);
}
if (event.data) {
event.data = optimizeObject(event.data);
}
}
}
function _addEventToQueues(event, append) {
// If send attempt field is undefined we should set it to 0.
if (!event[_DYN_SEND_ATTEMPT /* @min:%2esendAttempt */]) {
event[_DYN_SEND_ATTEMPT /* @min:%2esendAttempt */] = 0;
}
// Add default latency
if (!event[_DYN_LATENCY /* @min:%2elatency */]) {
event[_DYN_LATENCY /* @min:%2elatency */] = 1 /* EventLatencyValue.Normal */;
}
_cleanEvent(event);
if (event[_DYN_SYNC /* @min:%2esync */]) {
// If the transmission is backed off then do not send synchronous events.
// We will convert these events to Real time latency instead.
if (_currentBackoffCount || _paused) {
event[_DYN_LATENCY /* @min:%2elatency */] = 3 /* EventLatencyValue.RealTime */;
event[_DYN_SYNC /* @min:%2esync */] = false;
}
else {
// Log the event synchronously
if (_httpManager) {
// v8 performance optimization for iterating over the keys
if (_optimizeObject) {
event = optimizeObject(event);
}
_httpManager[_DYN_SEND_SYNCHRONOUS_BAT10 /* @min:%2esendSynchronousBatch */](EventBatch.create(event[_DYN_I_KEY /* @min:%2eiKey */], [event]), event[_DYN_SYNC /* @min:%2esync */] === true ? 1 /* EventSendType.Synchronous */ : event[_DYN_SYNC /* @min:%2esync */], 3 /* SendRequestReason.SyncEvent */);
return;
}
}
}
var evtLatency = event[_DYN_LATENCY /* @min:%2elatency */];
var queueSize = _queueSize;
var queueLimit = _queueSizeLimit;
if (evtLatency === 4 /* EventLatencyValue.Immediate */) {
queueSize = _immediateQueueSize;
queueLimit = _immediateQueueSizeLimit;
}
var eventDropped = false;
// Only add the event if the queue isn't full or it's a direct event (which don't add to the queue sizes)
if (queueSize < queueLimit) {
eventDropped = !_addEventToProperQueue(event, append);
}
else {
var dropLatency = 1 /* EventLatencyValue.Normal */;
var dropNumber = EventsDroppedAtOneTime;
if (evtLatency === 4 /* EventLatencyValue.Immediate */) {
// Only drop other immediate events as they are not technically sharing the general queue
dropLatency = 4 /* EventLatencyValue.Immediate */;
dropNumber = 1;
}
// Drop old event from lower or equal latency
eventDropped = true;
if (_dropEventWithLatencyOrLess(event[_DYN_I_KEY /* @min:%2eiKey */], event[_DYN_LATENCY /* @min:%2elatency */], dropLatency, dropNumber)) {
eventDropped = !_addEventToProperQueue(event, append);
}
}
if (eventDropped) {
// Can't drop events from current queues because the all the slots are taken by queues that are being flushed.
_notifyEvents(strEventsDiscarded, [event], EventsDiscardedReason.QueueFull);
}
}
_self.setEventQueueLimits = function (eventLimit, autoFlushLimit) {
_postConfig.eventsLimitInMem = _queueSizeLimit = isGreaterThanZero(eventLimit) ? eventLimit : MaxEventsLimitInMem;
_postConfig[_DYN_AUTO_FLUSH_EVENTS_LI14 /* @min:%2eautoFlushEventsLimit */] = _autoFlushEventsLimit = isGreaterThanZero(autoFlushLimit) ? autoFlushLimit : 0;
_setAutoLimits();
// We only do this check here as during normal event addition if the queue is > then events start getting dropped
var doFlush = _queueSize > eventLimit;
if (!doFlush && _autoFlushBatchLimit > 0) {
// Check the auto flush max batch size
for (var latency = 1 /* EventLatencyValue.Normal */; !doFlush && latency <= 3 /* EventLatencyValue.RealTime */; latency++) {
var batchQueue = _batchQueues[latency];
if (batchQueue && batchQueue[_DYN_BATCHES /* @min:%2ebatches */]) {
arrForEach(batchQueue[_DYN_BATCHES /* @min:%2ebatches */], function (theBatch) {
if (theBatch && theBatch[_DYN_COUNT /* @min:%2ecount */]() >= _autoFlushBatchLimit) {
// If any 1 batch is > than the limit then trigger an auto flush
doFlush = true;
}
});
}
}
}
_performAutoFlush(true, doFlush);
};
_self.pause = function () {
_clearScheduledTimer();
_paused = true;
_httpManager && _httpManager.pause();
};
_self.resume = function () {
_paused = false;
_httpManager && _httpManager.resume();
_scheduleTimer();
};
_self._loadTransmitProfiles = function (profiles) {
_resetTransmitProfiles();
objForEachKey(profiles, function (profileName, profileValue) {
var profLen = profileValue[_DYN_LENGTH /* @min:%2elength */];
if (profLen >= 2) {
var directValue = (profLen > 2 ? profileValue[2] : 0);
profileValue.splice(0, profLen - 2);
// Make sure if a higher latency is set to not send then don't send lower latency
if (profileValue[1] < 0) {
profileValue[0] = -1;
}
// Make sure each latency is multiple of the latency higher then it. If not a multiple
// we round up so that it becomes a multiple.
if (profileValue[1] > 0 && profileValue[0] > 0) {
var timerMultiplier = profileValue[0] / profileValue[1];
profileValue[0] = mathCeil(timerMultiplier) * profileValue[1];
}
// Add back the direct profile timeout
if (directValue >= 0 && profileValue[1] >= 0 && directValue > profileValue[1]) {
// Make sure if it's not disabled (< 0) then make sure it's not larger than RealTime
directValue = profileValue[1];
}
profileValue[_DYN_PUSH /* @min:%2epush */](directValue);
_profiles[profileName] = profileValue;
}
});
};
_self.flush = function (async, callback, sendReason) {
if (async === void 0) { async = true; }
var result;
if (!_paused) {
sendReason = sendReason || 1 /* SendRequestReason.ManualFlush */;
if (async) {
if (!callback) {
result = createPromise(function (resolve) {
// Set the callback to the promise resolve callback
callback = resolve;
});
}
if (_flushCallbackTimer == null) {
// Clear the normal schedule timer as we are going to try and flush ASAP
_clearScheduledTimer();
// Move all queued events to the HttpManager so that we don't discard new events (Auto flush scenario)
_queueBatches(1 /* EventLatencyValue.Normal */, 0 /* EventSendType.Batched */, sendReason);
_flushCallbackTimer = _createTimer(function () {
_flushCallbackTimer = null;
_flushImpl(callback, sendReason);
}, 0);
}
else {
// Even if null (no callback) this will ensure after the flushImpl finishes waiting
// for a completely idle connection it will attempt to re-flush any queued events on the next cycle
_flushCallbackQueue[_DYN_PUSH /* @min:%2epush */](callback);
}
}
else {
// Clear the normal schedule timer as we are going to try and flush ASAP
var cleared = _clearScheduledTimer();
// Now cause all queued events to be sent synchronously
_sendEventsForLatencyAndAbove(1 /* EventLatencyValue.Normal */, 1 /* EventSendType.Synchronous */, sendReason);
callback && callback();
if (cleared) {
// restart the normal event timer if it was cleared
_scheduleTimer();
}
}
}
return result;
};
_self.setMsaAuthTicket = function (ticket) {
_httpManager.addHeader(STR_MSA_DEVICE_TICKET, ticket);
};
_self.setAuthPluginHeader = function (token) {
_httpManager.addHeader(STR_AUTH_WEB_TOKEN, token);
};
_self.removeAuthPluginHeader = function () {
_httpManager.removeHeader(STR_AUTH_WEB_TOKEN);
};
_self.hasEvents = _hasEvents;
_self._setTransmitProfile = function (profileName) {
if (_currentProfile !== profileName && _profiles[profileName] !== undefined) {
_clearScheduledTimer();
_currentProfile = profileName;
_scheduleTimer();
}
};
proxyFunctions(_self, function () { return _httpManager; }, ["addResponseHandler"]);
/**
* Batch and send events currently in the queue for the given latency.
* @param latency - Latency for which to send events.
*/
function _sendEventsForLatencyAndAbove(latency, sendType, sendReason) {
var queued = _queueBatches(latency, sendType, sendReason);
// Always trigger the request as while the post channel may not have queued additional events, the httpManager may already have waiting events
_httpManager[_DYN_SEND_QUEUED_REQUESTS /* @min:%2esendQueuedRequests */](sendType, sendReason);
return queued;
}
function _hasEvents() {
return _queueSize > 0;
}
/**
* Try to schedule the timer after which events will be sent. If there are
* no events to be sent, or there is already a timer scheduled, or the
* http manager doesn't have any idle connections this method is no-op.
*/
function _scheduleTimer() {
// If we had previously attempted to send requests, but the http manager didn't have any idle connections then the requests where delayed
// so try and requeue then again now
if (_delayedBatchSendLatency >= 0 && _queueBatches(_delayedBatchSendLatency, 0 /* EventSendType.Batched */, _delayedBatchReason)) {
_httpManager[_DYN_SEND_QUEUED_REQUESTS /* @min:%2esendQueuedRequests */](0 /* EventSendType.Batched */, _delayedBatchReason);
}
if (_immediateQueueSize > 0 && !_immediateTimer && !_paused) {
// During initialization _profiles enforce that the direct [2] is less than real time [1] timer value
// If the immediateTimeout is disabled the immediate events will be sent with Real Time events
var immediateTimeOut = _profiles[_currentProfile][2];
if (immediateTimeOut >= 0) {
_immediateTimer = _createTimer(function () {
_immediateTimer = null;
// Only try to send direct events
_sendEventsForLatencyAndAbove(4 /* EventLatencyValue.Immediate */, 0 /* EventSendType.Batched */, 1 /* SendRequestReason.NormalSchedule */);
_scheduleTimer();
}, immediateTimeOut);
}
}
// During initialization the _profiles enforce that the normal [0] is a multiple of the real time [1] timer value
var timeOut = _profiles[_currentProfile][1];
if (!_scheduledTimer && !_flushCallbackTimer && timeOut >= 0 && !_paused) {
if (_hasEvents()) {
_scheduledTimer = _createTimer(function () {
_scheduledTimer = null;
_sendEventsForLatencyAndAbove(_timerCount === 0 ? 3 /* EventLatencyValue.RealTime */ : 1 /* EventLatencyValue.Normal */, 0 /* EventSendType.Batched */, 1 /* SendRequestReason.NormalSchedule */);
// Increment the count for next cycle
_timerCount++;
_timerCount %= 2;
_scheduleTimer();
}, timeOut);
}
else {
_timerCount = 0;
}
}
}
_self[_DYN__BACK_OFF_TRANSMISSI12 /* @min:%2e_backOffTransmission */] = function () {
if (_currentBackoffCount < MaxBackoffCount) {
_currentBackoffCount++;
_clearScheduledTimer();
_scheduleTimer();
}
};
_self._clearBackOff = function () {
if (_currentBackoffCount) {
_currentBackoffCount = 0;
_clearScheduledTimer();
_scheduleTimer();
}
};
function _initDefaults() {
_postConfig = null;
_isTeardownCalled = false;
_flushCallbackQueue = [];
_flushCallbackTimer = null;
_paused = false;
_immediateQueueSize = 0;
_immediateQueueSizeLimit = 500;
_queueSize = 0;
_queueSizeLimit = MaxEventsLimitInMem;
_profiles = {};
_currentProfile = RT_PROFILE;
_scheduledTimer = null;
_immediateTimer = null;
_currentBackoffCount = 0;
_timerCount = 0;
_batchQueues = {};
_autoFlushEventsLimit = 0;
_unloadHandlersAdded = false;
// either MaxBatchSize * (1+ Max Connections) or _queueLimit / 6 (where 3 latency Queues [normal, realtime, cost deferred] * 2 [allow half full -- allow for retry])
_autoFlushBatchLimit = 0;
_delayedBatchSendLatency = -1;
_delayedBatchReason = null;
_optimizeObject = true;
_isPageUnloadTriggered = false;
_maxEventSendAttempts = MaxSendAttempts;
_maxUnloadEventSendAttempts = MaxSyncUnloadSendAttempts;
_evtNamespace = null;
_overrideInstrumentationKey = null;
_maxEvtPerBatch = null;
_disableTelemetry = false;
_timeoutWrapper = createTimeoutWrapper();
// httpManager init should use the default value, because _maxEvtPerBatch is null currently
_httpManager = new HttpManager(MaxNumberEventPerBatch, MaxConnections, MaxRequestRetriesBeforeBackoff, {
requeue: _requeueEvents,
send: _sendingEvent,
sent: _eventsSentEvent,
drop: _eventsDropped,
rspFail: _eventsResponseFail,
oth: _otherEvent
});
_initializeProfiles();
_clearQueues();
_setAutoLimits();
}
function _createTimer(theTimerFunc, timeOut) {
// If the transmission is backed off make the timer at least 1 sec to allow for back off.
if (timeOut === 0 && _currentBackoffCount) {
timeOut = 1;
}
var timerMultiplier = 1000;
if (_currentBackoffCount) {
timerMultiplier = retryPolicyGetMillisToBackoffForRetry(_currentBackoffCount - 1);
}
return _timeoutWrapper.set(theTimerFunc, timeOut * timerMultiplier);
}
function _clearScheduledTimer() {
if (_scheduledTimer !== null) {
_scheduledTimer.cancel();
_scheduledTimer = null;
_timerCount = 0;
return true;
}
return false;
}
// Try to send all queued events using beacons if available
function _releaseAllQueues(sendType, sendReason) {
_clearScheduledTimer();
// Cancel all flush callbacks
if (_flushCallbackTimer) {
_flushCallbackTimer.cancel();
_flushCallbackTimer = null;
}
if (!_paused) {
// Queue all the remaining requests to be sent. The requests will be sent using HTML5 Beacons if they are available.
_sendEventsForLatencyAndAbove(1 /* EventLatencyValue.Normal */, sendType, sendReason);
}
}
/**
* Add empty queues for all latencies in the inbound queues map. This is called
* when Transmission Manager is being flushed. This ensures that new events added
* after flush are stored separately till we flush the current events.
*/
function _clearQueues() {
_batchQueues[4 /* EventLatencyValue.Immediate */] = {
batches: [],
iKeyMap: {}
};
_batchQueues[3 /* EventLatencyValue.RealTime */] = {
batches: [],
iKeyMap: {}
};
_batchQueues[2 /* EventLatencyValue.CostDeferred */] = {
batches: [],
iKeyMap: {}
};
_batchQueues[1 /* EventLatencyValue.Normal */] = {
batches: [],
iKeyMap: {}
};
}
function _getEventBatch(iKey, latency, create) {
var batchQueue = _batchQueues[latency];
if (!batchQueue) {
latency = 1 /* EventLatencyValue.Normal */;
batchQueue = _batchQueues[latency];
}
var eventBatch = batchQueue.iKeyMap[iKey];
if (!eventBatch && create) {
eventBatch = EventBatch.create(iKey);
batchQueue.batches[_DYN_PUSH /* @min:%2epush */](eventBatch);
batchQueue.iKeyMap[iKey] = eventBatch;
}
return eventBatch;
}
function _performAutoFlush(isAsync, doFlush) {
// Only perform the auto flush check if the httpManager has an idle connection and we are not in a backoff situation
if (_httpManager[_DYN_CAN_SEND_REQUEST /* @min:%2ecanSendRequest */]() && !_currentBackoffCount) {
if (_autoFlushEventsLimit > 0 && _queueSize > _autoFlushEventsLimit) {
// Force flushing
doFlush = true;
}
if (doFlush && _flushCallbackTimer == null) {
// Auto flush the queue, adding a callback to avoid the creation of a promise
_self.flush(isAsync, function () { }, 20 /* SendRequestReason.MaxQueuedEvents */);
}
}
}
function _addEventToProperQueue(event, append) {
// v8 performance optimization for iterating over the keys
if (_optimizeObject) {
event = optimizeObject(event);
}
var latency = event[_DYN_LATENCY /* @min:%2elatency */];
var eventBatch = _getEventBatch(event[_DYN_I_KEY /* @min:%2eiKey */], latency, true);
if (eventBatch.addEvent(event)) {
if (latency !== 4 /* EventLatencyValue.Immediate */) {
_queueSize++;
// Check for auto flushing based on total events in the queue, but not for requeued or retry events
if (append && event[_DYN_SEND_ATTEMPT /* @min:%2esendAttempt */] === 0) {
// Force the flushing of the batch if the batch (specific iKey / latency combination) reaches it's auto flush limit
_performAutoFlush(!event.sync, _autoFlushBatchLimit > 0 && eventBatch[_DYN_COUNT /* @min:%2ecount */]() >= _autoFlushBatchLimit);
}
}
else {
// Direct events don't need auto flushing as they are scheduled (by default) for immediate delivery
_immediateQueueSize++;
}
return true;
}
return false;
}
function _dropEventWithLatencyOrLess(iKey, latency, currentLatency, dropNumber) {
while (currentLatency <= latency) {
var eventBatch = _getEventBatch(iKey, latency, true);
if (eventBatch && eventBatch[_DYN_COUNT /* @min:%2ecount */]() > 0) {
// Dropped oldest events from lowest possible latency
var droppedEvents = eventBatch[_DYN_SPLIT /* @min:%2esplit */](0, dropNumber);
var droppedCount = droppedEvents[_DYN_COUNT /* @min:%2ecount */]();
if (droppedCount > 0) {
if (currentLatency === 4 /* EventLatencyValue.Immediate */) {
_immediateQueueSize -= droppedCount;
}
else {
_queueSize -= droppedCount;
}
_notifyBatchEvents(strEventsDiscarded, [droppedEvents], EventsDiscardedReason.QueueFull);
return true;
}
}
currentLatency++;
}
// Unable to drop any events -- lets just make sure the queue counts are correct to avoid exhaustion
_resetQueueCounts();
return false;
}
/**
* Internal helper to reset the queue counts, used as a backstop to avoid future queue exhaustion errors
* that might occur because of counting issues.
*/
function _resetQueueCounts() {
var immediateQueue = 0;
var normalQueue = 0;
var _loop_1 = function (latency) {
var batchQueue = _batchQueues[latency];
if (batchQueue && batchQueue[_DYN_BATCHES /* @min:%2ebatches */]) {
arrForEach(batchQueue[_DYN_BATCHES /* @min:%2ebatches */], function (theBatch) {
if (latency === 4 /* EventLatencyValue.Immediate */) {
immediateQueue += theBatch[_DYN_COUNT /* @min:%2ecount */]();
}
else {
normalQueue += theBatch[_DYN_COUNT /* @min:%2ecount */]();
}
});
}
};
for (var latency = 1 /* EventLatencyValue.Normal */; latency <= 4 /* EventLatencyValue.Immediate */; latency++) {
_loop_1(latency);
}
_queueSize = normalQueue;
_immediateQueueSize = immediateQueue;
}
function _queueBatches(latency, sendType, sendReason) {
var eventsQueued = false;
var isAsync = sendType === 0 /* EventSendType.Batched */;
// Only queue batches (to the HttpManager) if this is a sync request or the httpManager has an idle connection
// Thus keeping the events within the PostChannel until the HttpManager has a connection available
// This is so we can drop "old" events if the queue is getting full because we can't successfully send events
if (!isAsync || _httpManager[_DYN_CAN_SEND_REQUEST /* @min:%2ecanSendRequest */]()) {
doPerf(_self.core, function () { return "PostChannel._queueBatches"; }, function () {
var droppedEvents = [];
var latencyToProcess = 4 /* EventLatencyValue.Immediate */;
while (latencyToProcess >= latency) {
var batchQueue = _batchQueues[latencyToProcess];
if (batchQueue && batchQueue.batches && batchQueue.batches[_DYN_LENGTH /* @min:%2elength */] > 0) {
arrForEach(batchQueue[_DYN_BATCHES /* @min:%2ebatches */], function (theBatch) {
// Add the batch to the http manager to send the requests
if (!_httpManager.addBatch(theBatch)) {
// The events from this iKey are being dropped (killed)
droppedEvents = droppedEvents[_DYN_CONCAT /* @min:%2econcat */](theBatch[_DYN_EVENTS /* @min:%2eevents */]());
}
else {
eventsQueued = eventsQueued || (theBatch && theBatch[_DYN_COUNT /* @min:%2ecount */]() > 0);
}
if (latencyToProcess === 4 /* EventLatencyValue.Immediate */) {
_immediateQueueSize -= theBatch[_DYN_COUNT /* @min:%2ecount */]();
}
else {
_queueSize -= theBatch[_DYN_COUNT /* @min:%2ecount */]();
}
});
// Remove all batches from this Queue
batchQueue[_DYN_BATCHES /* @min:%2ebatches */] = [];
batchQueue.iKeyMap = {};
}
latencyToProcess--;
}
if (droppedEvents[_DYN_LENGTH /* @min:%2elength */] > 0) {
_notifyEvents(strEventsDiscarded, droppedEvents, EventsDiscardedReason.KillSwitch);
}
if (eventsQueued && _delayedBatchSendLatency >= latency) {
// We have queued events at the same level as the delayed values so clear the setting
_delayedBatchSendLatency = -1;
_delayedBatchReason = 0 /* SendRequestReason.Undefined */;
}
}, function () { return ({ latency: latency, sendType: sendType, sendReason: sendReason }); }, !isAsync);
}
else {
// remember the min latency so that we can re-trigger later
_delayedBatchSendLatency = _delayedBatchSendLatency >= 0 ? mathMin(_delayedBatchSendLatency, latency) : latency;
_delayedBatchReason = mathMax(_delayedBatchReason, sendReason);
}
return eventsQueued;
}
/**
* This is the callback method is called as part of the manual flushing process.
* @param callback - The callback method to call after the flush is complete
* @param sendReason - The reason why the flush is being called
*/
function _flushImpl(callback, sendReason) {
// Add any additional queued events and cause all queued events to be sent asynchronously
_sendEventsForLatencyAndAbove(1 /* EventLatencyValue.Normal */, 0 /* EventSendType.Batched */, sendReason);
// All events (should) have been queue -- lets just make sure the queue counts are correct to avoid queue exhaustion (previous bug #9685112)
_resetQueueCounts();
_waitForIdleManager(function () {
// Only called AFTER the httpManager does not have any outstanding requests
if (callback) {
callback();
}
if (_flushCallbackQueue[_DYN_LENGTH /* @min:%2elength */] > 0) {
_flushCallbackTimer = _createTimer(function () {
_flushCallbackTimer = null;
_flushImpl(_flushCallbackQueue.shift(), sendReason);
}, 0);
}
else {
// No more flush requests
_flushCallbackTimer = null;
// Restart the normal timer schedule
_scheduleTimer();
}
});
}
function _waitForIdleManager(callback) {
if (_httpManager.isCompletelyIdle()) {
callback();
}
else {
_flushCallbackTimer = _createTimer(function () {
_flushCallbackTimer = null;
_waitForIdleManager(callback);
}, FlushCheckTimer);
}
}
/**
* Resets the transmit profiles to the default profiles of Real Time, Near Real Time
* and Best Effort. This removes all the custom profiles that were loaded.
*/
function _resetTransmitProfiles() {
_clearScheduledTimer();
_initializeProfiles();
_currentProfile = RT_PROFILE;
_scheduleTimer();
}
function _initializeProfiles() {
_profiles = {};
_profiles[RT_PROFILE] = [2, 1, 0];
_profiles[NRT_PROFILE] = [6, 3, 0];