UNPKG

rx-player

Version:
1,320 lines (1,301 loc) 1.56 MB
"use strict"; (() => { var __defProp = Object.defineProperty; var __defProps = Object.defineProperties; var __getOwnPropDescs = Object.getOwnPropertyDescriptors; var __getOwnPropSymbols = Object.getOwnPropertySymbols; var __hasOwnProp = Object.prototype.hasOwnProperty; var __propIsEnum = Object.prototype.propertyIsEnumerable; var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value; var __spreadValues = (a, b) => { for (var prop in b || (b = {})) if (__hasOwnProp.call(b, prop)) __defNormalProp(a, prop, b[prop]); if (__getOwnPropSymbols) for (var prop of __getOwnPropSymbols(b)) { if (__propIsEnum.call(b, prop)) __defNormalProp(a, prop, b[prop]); } return a; }; var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b)); // <define:__ENVIRONMENT__> var define_ENVIRONMENT_default = { PRODUCTION: 0, DEV: 1, CURRENT_ENV: 0 }; // <define:__LOGGER_LEVEL__> var define_LOGGER_LEVEL_default = { CURRENT_LEVEL: "NONE" }; // src/utils/is_null_or_undefined.ts function isNullOrUndefined(x) { return x === null || x === void 0; } // src/utils/event_emitter.ts var EventEmitter = class { constructor() { this._listeners = {}; } /** * Register a new callback for an event. * * @param {string} evt - The event to register a callback to * @param {Function} fn - The callback to call as that event is triggered. * The callback will take as argument the eventual payload of the event * (single argument). * @param {Object | undefined} cancellationSignal - When that signal emits, * the event listener is automatically removed. */ addEventListener(evt, fn, cancellationSignal) { const listeners = this._listeners[evt]; if (!Array.isArray(listeners)) { this._listeners[evt] = [fn]; } else { listeners.push(fn); } if (cancellationSignal !== void 0) { cancellationSignal.register(() => { this.removeEventListener(evt, fn); }); } } /** * Unregister callbacks linked to events. * @param {string} [evt] - The event for which the callback[s] should be * unregistered. Set it to null or undefined to remove all callbacks * currently registered (for any event). * @param {Function} [fn] - The callback to unregister. If set to null * or undefined while the evt argument is set, all callbacks linked to that * event will be unregistered. */ removeEventListener(evt, fn) { if (isNullOrUndefined(evt)) { this._listeners = {}; return; } const listeners = this._listeners[evt]; if (!Array.isArray(listeners)) { return; } if (isNullOrUndefined(fn)) { delete this._listeners[evt]; return; } const index = listeners.indexOf(fn); if (index !== -1) { listeners.splice(index, 1); } if (listeners.length === 0) { delete this._listeners[evt]; } } /** * Trigger every registered callbacks for a given event * @param {string} evt - The event to trigger * @param {*} arg - The eventual payload for that event. All triggered * callbacks will recieve this payload as argument. */ trigger(evt, arg) { const listeners = this._listeners[evt]; if (!Array.isArray(listeners)) { return; } listeners.slice().forEach((listener) => { try { listener(arg); } catch (e) { if (define_ENVIRONMENT_default.CURRENT_ENV === define_ENVIRONMENT_default.DEV) { throw e instanceof Error ? e : new Error("EventEmitter: listener error"); } console.error("RxPlayer: EventEmitter error", e instanceof Error ? e : null); } }); } }; // src/utils/is_worker.ts var is_worker_default = typeof WorkerGlobalScope !== "undefined" && self instanceof WorkerGlobalScope; // src/utils/is_node.ts var isNode = typeof window === "undefined" && !is_worker_default; var is_node_default = isNode; // src/utils/global_scope.ts var globalScope; if (is_worker_default) { globalScope = self; } else if (is_node_default) { globalScope = global; } else { globalScope = window; } var global_scope_default = globalScope; // src/utils/queue_microtask.ts var queue_microtask_default = typeof queueMicrotask === "function" ? queueMicrotask : function queueMicrotaskPonyfill(cb) { Promise.resolve().then(cb, () => cb()); }; // src/compat/patch_webkit_source_buffer.ts function patchWebkitSourceBuffer() { if (!is_node_default && !isNullOrUndefined( global_scope_default.WebKitSourceBuffer ) && global_scope_default.WebKitSourceBuffer.prototype.addEventListener === void 0) { const sourceBufferWebkitRef = global_scope_default.WebKitSourceBuffer; const sourceBufferWebkitProto = sourceBufferWebkitRef.prototype; for (const fnName in EventEmitter.prototype) { if (Object.prototype.hasOwnProperty.call(EventEmitter.prototype, fnName)) { sourceBufferWebkitProto[fnName] = EventEmitter.prototype[fnName]; } } sourceBufferWebkitProto._listeners = []; sourceBufferWebkitProto._emitUpdate = function(eventName, val) { queue_microtask_default(() => { this.trigger(eventName, val); this.updating = false; this.trigger("updateend", new Event("updateend")); }); }; sourceBufferWebkitProto.appendBuffer = function(data) { var _a2, _b2; if (this.updating) { throw new Error("updating"); } this.trigger("updatestart", new Event("updatestart")); this.updating = true; try { this.append(data); } catch (error) { (_a2 = this._emitUpdate) == null ? void 0 : _a2.call(this, "error", error); return; } (_b2 = this._emitUpdate) == null ? void 0 : _b2.call(this, "update", new Event("update")); }; } } // src/utils/array_find_index.ts function arrayFindIndex(arr, predicate, thisArg) { if (typeof Array.prototype.findIndex === "function") { return arr.findIndex(predicate, thisArg); } const len = arr.length >>> 0; for (let i = 0; i < len; i++) { if (predicate.call(thisArg, arr[i], i, arr)) { return i; } } return -1; } // src/utils/noop.ts function noop_default() { } // src/utils/reference.ts var SharedReference = class { /** * Create a `SharedReference` object encapsulating the mutable `initialValue` * value of type T. * @param {*} initialValue * @param {Object|undefined} [cancelSignal] - If set, the created shared * reference will be automatically "finished" once that signal emits. * Finished references won't be able to update their value anymore, and will * also automatically have their listeners (callbacks linked to value change) * removed - as they cannot be triggered anymore, thus providing a security * against memory leaks. */ constructor(initialValue, cancelSignal) { this._value = initialValue; this._listeners = []; this._isFinished = false; this._onFinishCbs = []; if (cancelSignal !== void 0) { this._deregisterCancellation = cancelSignal.register(() => this.finish()); } } /** * Returns the current value of this shared reference. * @returns {*} */ getValue() { return this._value; } /** * Update the value of this shared reference. * @param {*} newVal */ setValue(newVal) { if (this._isFinished) { if (define_ENVIRONMENT_default.CURRENT_ENV === define_ENVIRONMENT_default.DEV) { console.error("Finished shared references cannot be updated"); } return; } this._value = newVal; if (this._listeners.length === 0) { return; } const clonedCbs = this._listeners.slice(); for (const cbObj of clonedCbs) { try { if (!cbObj.hasBeenCleared) { cbObj.trigger(newVal, cbObj.complete); } } catch (_) { } } } /** * Update the value of this shared reference only if the value changed. * * Note that this function only performs a strict equality reference through * the "===" operator. Different objects that are structurally the same will * thus be considered different. * @param {*} newVal */ setValueIfChanged(newVal) { if (newVal !== this._value) { this.setValue(newVal); } } /** * Allows to register a callback to be called each time the value inside the * reference is updated. * @param {Function} cb - Callback to be called each time the reference is * updated. Takes as first argument its new value and in second argument a * callback allowing to unregister the callback. * @param {Object} params * @param {Object} params.clearSignal - Allows to provide a CancellationSignal * which will unregister the callback when it emits. * @param {boolean|undefined} [params.emitCurrentValue] - If `true`, the * callback will also be immediately called with the current value. */ onUpdate(cb, params) { const unlisten = () => { if (params.clearSignal !== void 0) { params.clearSignal.deregister(unlisten); } if (cbObj.hasBeenCleared) { return; } cbObj.hasBeenCleared = true; const indexOf = this._listeners.indexOf(cbObj); if (indexOf >= 0) { this._listeners.splice(indexOf, 1); } }; const cbObj = { trigger: cb, complete: unlisten, hasBeenCleared: false }; this._listeners.push(cbObj); if (params.emitCurrentValue === true) { cb(this._value, unlisten); } if (this._isFinished || cbObj.hasBeenCleared) { unlisten(); return; } params.clearSignal.register(unlisten); } /** * Variant of `onUpdate` which will only call the callback once, once the * value inside the reference is different from `undefined`. * The callback is called synchronously if the value already isn't set to * `undefined`. * * This method can be used as a lighter weight alternative to `onUpdate` when * just waiting that the stored value becomes defined. * As such, it is an explicit equivalent to something like: * ```js * myReference.onUpdate((newVal, stopListening) => { * if (newVal !== undefined) { * stopListening(); * * // ... do the logic * } * }, { emitCurrentValue: true }); * ``` * @param {Function} cb - Callback to be called each time the reference is * updated. Takes the new value in argument. * @param {Object} params * @param {Object} params.clearSignal - Allows to provide a * CancellationSignal which will unregister the callback when it emits. */ waitUntilDefined(cb, params) { this.onUpdate( (val, stopListening) => { if (val !== void 0) { stopListening(); cb(this._value); } }, { clearSignal: params.clearSignal, emitCurrentValue: true } ); } /** * Allows to register a callback for when the Shared Reference is "finished". * * This function is mostly there for implementing operators on the shared * reference and isn't meant to be used by regular code, hence it being * prefixed by `_`. * @param {Function} cb - Callback to be called once the reference is * finished. * @param {Object} onFinishCancelSignal - Allows to provide a * CancellationSignal which will unregister the callback when it emits. */ _onFinished(cb, onFinishCancelSignal) { if (onFinishCancelSignal.isCancelled()) { return noop_default; } const cleanUp = () => { const indexOf = arrayFindIndex(this._onFinishCbs, (x) => x.trigger === trigger); if (indexOf >= 0) { this._onFinishCbs[indexOf].hasBeenCleared = true; this._onFinishCbs.splice(indexOf, 1); } }; const trigger = () => { cleanUp(); cb(); }; const deregisterCancellation = onFinishCancelSignal.register(cleanUp); this._onFinishCbs.push({ trigger, hasBeenCleared: false }); return deregisterCancellation; } /** * Indicate that no new values will be emitted. * Allows to automatically free all listeners linked to this reference. */ finish() { if (this._deregisterCancellation !== void 0) { this._deregisterCancellation(); } this._isFinished = true; const clonedCbs = this._listeners.slice(); for (const cbObj of clonedCbs) { try { if (!cbObj.hasBeenCleared) { cbObj.complete(); cbObj.hasBeenCleared = true; } } catch (_) { } } this._listeners.length = 0; if (this._onFinishCbs.length > 0) { const clonedFinishedCbs = this._onFinishCbs.slice(); for (const cbObj of clonedFinishedCbs) { try { if (!cbObj.hasBeenCleared) { cbObj.trigger(); cbObj.hasBeenCleared = true; } } catch (_) { } } this._onFinishCbs.length = 0; } } }; function createMappedReference(originalRef, mappingFn, cancellationSignal) { const newRef = new SharedReference( mappingFn(originalRef.getValue()), cancellationSignal ); originalRef.onUpdate( function mapOriginalReference(x) { newRef.setValue(mappingFn(x)); }, { clearSignal: cancellationSignal } ); originalRef._onFinished(() => { newRef.finish(); }, cancellationSignal); return newRef; } var reference_default = SharedReference; // src/utils/monotonic_timestamp.ts var mainThreadTimestampDiff = new reference_default(0); var getMonotonicTimeStamp = typeof performance !== "undefined" ? ( // eslint-disable-next-line no-restricted-properties () => performance.now() + mainThreadTimestampDiff.getValue() ) : () => Date.now() + mainThreadTimestampDiff.getValue(); var monotonic_timestamp_default = getMonotonicTimeStamp; // src/utils/logger.ts var DEFAULT_LOG_LEVEL = "NONE"; var Logger = class extends EventEmitter { constructor() { super(); this.error = noop_default; this.warn = noop_default; this.info = noop_default; this.debug = noop_default; this._levels = { NONE: 0, ERROR: 1, WARNING: 2, INFO: 3, DEBUG: 4 }; this._currentFormat = "standard"; this._currentLevel = DEFAULT_LOG_LEVEL; } /** * Update the logger's level to increase or decrease its verbosity, to change * its format with a newly set one, or to update its logging function. * @param {string} levelStr - One of the [upper-case] logger level. If the * given level is not valid, it will default to `"NONE"`. * @param {function|undefined} [logFn] - Optional logger function which will * be called with logs (with the corresponding upper-case logger level as * first argument). * Can be omited to just rely on regular logging functions. */ setLevel(levelStr, format, logFn) { let level; const foundLevel = this._levels[levelStr]; if (typeof foundLevel === "number") { level = foundLevel; this._currentLevel = levelStr; } else { level = 0; this._currentLevel = "NONE"; } let actualFormat; if (format === "standard" || format === "full") { actualFormat = format; } else { actualFormat = "standard"; } if (actualFormat === "full" && actualFormat !== this._currentFormat) { const now = monotonic_timestamp_default(); console.log(String(now.toFixed(2)), "[Init]", `Local-Date: ${Date.now()}`); } this._currentFormat = actualFormat; const generateLogFn = this._currentFormat === "full" ? (logMethod, consoleFn) => { return (namespace, ...args) => { const now = monotonic_timestamp_default(); return consoleFn( String(now.toFixed(2)), `[${logMethod}]`, namespace + ":", ...args.map( (a) => typeof a === "object" && a !== null && !(a instanceof Error) ? formatContextObject(a) : a ) ); }; } : (_logMethod, consoleFn) => { return (namespace, ...args) => { return consoleFn( namespace + ":", ...args.map( (a) => typeof a === "object" && a !== null && !(a instanceof Error) ? formatContextObject(a) : a ) ); }; }; if (logFn === void 0) { this.error = level >= this._levels.ERROR ? generateLogFn("error", console.error.bind(console)) : noop_default; this.warn = level >= this._levels.WARNING ? generateLogFn("warn", console.warn.bind(console)) : noop_default; this.info = level >= this._levels.INFO ? generateLogFn("info", console.info.bind(console)) : noop_default; this.debug = level >= this._levels.DEBUG ? generateLogFn("log", console.log.bind(console)) : noop_default; } else { const produceLogFn = (logLevel) => { return level >= this._levels[logLevel] ? (namespace, ...args) => { return logFn(logLevel, namespace, args); } : noop_default; }; this.error = produceLogFn("ERROR"); this.warn = produceLogFn("WARNING"); this.info = produceLogFn("INFO"); this.debug = produceLogFn("DEBUG"); } this.trigger("onLogLevelChange", { level: this._currentLevel, format: this._currentFormat }); } /** * Get the last set logger level, as an upper-case string value. * @returns {string} */ getLevel() { return this._currentLevel; } /** * Get the last set logger's log format. * @returns {string} */ getFormat() { return this._currentFormat; } /** * Returns `true` if the currently set level includes logs of the level given * in argument. * @param {string} logLevel * @returns {boolean} */ hasLevel(logLevel) { return this._levels[logLevel] >= this._levels[this._currentLevel]; } }; function formatContextObject(obj) { let ret = ""; for (const key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { if (ret.length > 0) { ret += " "; } const val = obj[key]; if (val instanceof Error) { ret += `${key}="${JSON.stringify(val == null ? void 0 : val.toString())}"`; } else { ret += `${key}=${typeof val === "string" ? `${JSON.stringify(val)}` : String(val)}`; } } } return ret; } // src/log.ts var logger = new Logger(); var log_default = logger; // src/compat/browser_compatibility_types.ts function assertTypeCompatibility() { } assertTypeCompatibility(); assertTypeCompatibility(); assertTypeCompatibility(); assertTypeCompatibility(); assertTypeCompatibility(); assertTypeCompatibility(); assertTypeCompatibility(); var gs = global_scope_default; var _a, _b, _c, _d, _e; var MediaSource_ = (_e = (_d = (_c = (_b = (_a = gs == null ? void 0 : gs.MediaSource) != null ? _a : gs == null ? void 0 : gs.MozMediaSource) != null ? _b : gs == null ? void 0 : gs.WebKitMediaSource) != null ? _c : gs == null ? void 0 : gs.MSMediaSource) != null ? _d : gs == null ? void 0 : gs.ManagedMediaSource) != null ? _e : void 0; var isManagedMediaSource = MediaSource_ !== void 0 && MediaSource_ === (gs == null ? void 0 : gs.ManagedMediaSource); // src/compat/is_codec_supported.ts var MAX_SUPPORT_MAP_ENTRIES = 200; var supportMap = /* @__PURE__ */ new Map(); function isCodecSupported(mediaElement, mimeType) { if (mediaElement.isDummy) { return mediaElement.FORCED_MEDIA_SOURCE.isTypeSupported( mimeType ); } if (isNullOrUndefined(MediaSource_)) { if (is_worker_default) { log_default.error("mse", "Cannot request codec support in a worker without MSE."); } return false; } if (typeof MediaSource_.isTypeSupported === "function") { const cachedSupport = supportMap.get(mimeType); if (cachedSupport !== void 0) { return cachedSupport; } else { const isSupported = MediaSource_.isTypeSupported(mimeType); if (supportMap.size >= MAX_SUPPORT_MAP_ENTRIES) { supportMap.clear(); } supportMap.set(mimeType, isSupported); return isSupported; } } return true; } // src/default_config.ts var DEFAULT_CONFIG = { /** * Default time interval after which a request will timeout, in ms. * @type {Number} */ DEFAULT_REQUEST_TIMEOUT: 30 * 1e3, /** * Default connection time after which a request will timeout, in ms. * @type {Number} */ DEFAULT_CONNECTION_TIMEOUT: 15 * 1e3, /** * Can be either: * - "native": Subtitles are all displayed in a <track> element * - "html": Subtitles are all displayed in a <div> separated from the video * element. Can be useful to display richer TTML subtitles, for example. * @type {Object|null} */ DEFAULT_TEXT_TRACK_MODE: "native", /** * Default behavior for the `enableFastSwitching` loadVideo options. * * Fast-switching allows to provide quicker transitions from lower quality * segments to higher quality segments but might be badly supported on some * devices. * When enabled, the RxPlayer might replace segments of a lower-quality * (with a lower bitrate) with segments of a higher quality (with a higher * bitrate). This allows to have a fast transition when network conditions * improve. * When disabled, segments of a lower-quality will not be replaced. */ DEFAULT_ENABLE_FAST_SWITCHING: true, /** * In some cases after switching the current track or bitrate, the RxPlayer * could be led to go into the `"RELOADING"` state, which corresponds to * visually a black screen (with nothing audible) before restarting playback. * * We could want to seek back some milliseconds when doing that. * For example, when switching the current audio track, it might make sense * to restart some time before, so the beginning of the sentence can be heard * again in the new language. * * This config property allows to set the relative position the RxPlayer will * seek to after reloading, in seconds. * * For example: a value of `-0.7` means that will seek back 700 milliseconds * when reloading due to a track or bitrate switch with necessitated a * reloading. */ DELTA_POSITION_AFTER_RELOAD: { /** Relative position when switching the bitrate */ bitrateSwitch: -0.1, /** * Relative position when switching the track. * * From tests, I noticed that seeking back was only really "pleasant" when * switching the audio track. * * E.g. switching the video track often means changing the camera angle or * even totally changing what is being seen and rely much less on temporal * context than when an audio track is switched. * As such, I decided to only set a sensible seek-back behavior when * switching the audio track, and only a minimal one (to still ensure * nothing was missed) for video. * * "Other" mainly concern text track, where seeking back could even be * annoying, so that behavior has been disabled in that case. */ trackSwitch: { audio: 0, video: 0, other: 0 } }, /** * Behavior of the RxPlayer when encountering a whole other codec on a already * existing audio or video SourceBuffer. * * Can be either: * * - "continue": Segments linked to the new codec will continue to be * pushed to that same SourceBuffer. The RxPlayer will still try to call * the `changeType` API on the SourceBuffer before pushing those * segments but continue even if this call failed. * * - "reload": Every time a new incompatible codec is encountered on a * given SourceBuffer, we will reload the MediaSource. */ DEFAULT_CODEC_SWITCHING_BEHAVIOR: "continue", /** * Specifies the behavior when all audio tracks are not playable. * * - If set to `"continue"`, the player will proceed to play the content without audio. * - If set to `"error"`, an error will be thrown to indicate that the audio tracks could not be played. * * Note: If neither the audio nor the video tracks are playable, an error will be thrown regardless of this setting. */ DEFAULT_AUDIO_TRACKS_NOT_PLAYABLE_BEHAVIOR: "error", /** * Specifies the behavior when all video tracks are not playable. * * - If set to `"continue"`, the player will proceed to play the content without video. * - If set to `"error"`, an error will be thrown to indicate that the video tracks could not be played. * * Note: If neither the audio nor the video tracks are playable, an error will be thrown regardless of this setting. */ DEFAULT_VIDEO_TRACKS_NOT_PLAYABLE_BEHAVIOR: "error", /** * If set to true, video through loadVideo will auto play by default * @type {Boolean} */ DEFAULT_AUTO_PLAY: false, /** * Default buffer goal in seconds. * Once enough content has been downloaded to fill the buffer up to * ``current position + DEFAULT_WANTED_BUFFER_AHEAD", we will stop downloading * content. * @type {Number} */ DEFAULT_WANTED_BUFFER_AHEAD: 30, /** * Default max buffer size ahead of the current position in seconds. * The buffer _after_ this limit will be garbage collected. * Set to Infinity for no limit. * @type {Number} */ DEFAULT_MAX_BUFFER_AHEAD: Infinity, /** * Default max buffer size ahead of the current position in seconds. * The buffer _before_ this limit will be garbage collected. * Set to Infinity for no limit. * @type {Number} */ DEFAULT_MAX_BUFFER_BEHIND: Infinity, /** * Default video buffer memory limit in kilobytes. * Once enough video content has been downloaded to fill the buffer up to * DEFAULT_MAX_VIDEO_BUFFER_SIZE , we will stop downloading * content. * @type {Number} */ DEFAULT_MAX_VIDEO_BUFFER_SIZE: Infinity, /** * Maximum possible buffer ahead for each type of buffer, to avoid too much * memory usage when playing for a long time. * Equal to Infinity if not defined here. * @type {Object} */ MAXIMUM_MAX_BUFFER_AHEAD: { text: 5 * 60 * 60 }, /** * Minimum possible buffer ahead for each type of buffer, to avoid Garbage * Collecting too much data when it would have adverse effects. * Equal to `0` if not defined here. * @type {Object} */ MINIMUM_MAX_BUFFER_AHEAD: { // Text segments are both much lighter on resources and might // actually be much larger than other types of segments in terms // of duration. Let's make an exception here by authorizing a // larger text buffer ahead, to avoid unnecesarily reloading the // same text track. text: 2 * 60 }, /** * Maximum possible buffer behind for each type of buffer, to avoid too much * memory usage when playing for a long time. * Equal to Infinity if not defined here. * @type {Object} */ MAXIMUM_MAX_BUFFER_BEHIND: { text: 5 * 60 * 60 }, /** * Default bitrate ceils initially set as the first content begins. * * If no track is found with a bitrate inferior or equal to the * bitrate there, the one with the lowest bitrate will be taken instead. * * Set to 0 for the lowest bitrate, Infinity for the highest. * * These values are only useful for the first content played, as consecutive * play will always take the last set one. * @type {Object} */ DEFAULT_BASE_BANDWIDTH: 0, /** * Delay after which, if the page is hidden, the user is considered inactive * on the current video. * * Allow to enforce specific optimizations when the page is not shown. * @see DEFAULT_THROTTLE_WHEN_HIDDEN * @type {Number} */ INACTIVITY_DELAY: 60 * 1e3, /** * If true, if the video is considered in a "hidden" state for a delay specified by * the INACTIVITY DELAY config property, we throttle automatically to the video * representation with the lowest bitrate. * @type {Boolean} */ DEFAULT_THROTTLE_VIDEO_BITRATE_WHEN_HIDDEN: false, /** * Default video resolution limit behavior. * * This option allows for example to throttle the video resolution so it * does not exceed the screen resolution. * * Here set to "none" by default to disable throttling. * @type {Boolean} */ DEFAULT_VIDEO_RESOLUTION_LIMIT: "none", /** * Default initial live gap considered if no presentation delay has been * suggested, in seconds. * @type {Number} */ DEFAULT_LIVE_GAP: { DEFAULT: 10, LOW_LATENCY: 3.5 }, /** * Maximum time, in seconds, the player should automatically skip when stalled * because of a current hole in the buffer. * Bear in mind that this might seek over not-yet-downloaded/pushed segments. * @type {Number} */ BUFFER_DISCONTINUITY_THRESHOLD: 0.2, /** * Ratio used to know if an already loaded segment should be re-buffered. * We re-load the given segment if the current one times that ratio is * inferior to the new one. * @type {Number} */ BITRATE_REBUFFERING_RATIO: 1.5, /** * The default number of times a manifest request will be re-performed * when loaded/refreshed if the request finishes on an error which * justify an retry. * * Note that some errors do not use this counter: * - if the error is not due to the xhr, no retry will be peformed * - if the error is an HTTP error code, but not a 500-smthg or a 404, no * retry will be performed. * @type Number */ DEFAULT_MAX_MANIFEST_REQUEST_RETRY: 4, /** * Default delay, in seconds, during which a CDN will be "downgraded". * * For example in case of media content being available on multiple CDNs, the * RxPlayer may decide that a CDN is less reliable (for example, it returned a * server error) and should thus be avoided, at least for some time * * This value is the amount of time this CDN will be "less considered" than the * alternatives. */ DEFAULT_CDN_DOWNGRADE_TIME: 60, /** * The default number of times a segment request will be re-performed when * on error which justify a retry. * * Note that some errors do not use this counter: * - if the error is not due to the xhr, no retry will be peformed * - if the error is an HTTP error code, but not a 500-smthg or a 404, no * retry will be performed. * @type Number */ DEFAULT_MAX_REQUESTS_RETRY_ON_ERROR: 4, /** * Initial backoff delay when a segment / manifest download fails, in * milliseconds. * * This delay will then grow exponentally by power of twos (200, 400, 800 * etc.) * * Please note that this delay is not exact, as it will be fuzzed. * @type {Number} */ INITIAL_BACKOFF_DELAY_BASE: { REGULAR: 200, LOW_LATENCY: 50 }, /** * Maximum backoff delay when a segment / manifest download fails, in * milliseconds. * * Please note that this delay is not exact, as it will be fuzzed. * @type {Number} */ MAX_BACKOFF_DELAY_BASE: { REGULAR: 3e3, LOW_LATENCY: 1e3 }, /** * Minimum interval at which playback information samples will be taken. This * variable is for the "regular" mediasource strategy (that is, not for the * directfile API. * * At each of these interval, various different modules in the RxPlayer will * run based on the information communicated. * * Keep in mind this is the minimum interval. This logic will also be * triggered when various events of the media element are received. * @type {Number} */ SAMPLING_INTERVAL_MEDIASOURCE: 1e3, /** * Same than SAMPLING_INTERVAL_MEDIASOURCE but for lowLatency mode. * @type {Number} */ SAMPLING_INTERVAL_LOW_LATENCY: 500, /** * Same than SAMPLING_INTERVAL_MEDIASOURCE but for the directfile API. * @type {Number} */ SAMPLING_INTERVAL_NO_MEDIASOURCE: 500, /** * Amount of buffer to have ahead of the current position before we may * consider buffer-based adaptive estimates, in seconds. * * For example setting it to `10` means that we need to have ten seconds of * buffer ahead of the current position before relying on buffer-based * adaptive estimates. * * To avoid getting in-and-out of the buffer-based logic all the time, it * should be set higher than `ABR_EXIT_BUFFER_BASED_ALGO`. */ ABR_ENTER_BUFFER_BASED_ALGO: 10, /** * Below this amount of buffer ahead of the current position, in seconds, we * will stop using buffer-based estimate in our adaptive logic to select a * quality. * * For example setting it to `5` means that if we have less than 5 seconds of * buffer ahead of the current position, we should stop relying on * buffer-based estimates to choose a quality. * * To avoid getting in-and-out of the buffer-based logic all the time, it * should be set lower than `ABR_ENTER_BUFFER_BASED_ALGO`. */ ABR_EXIT_BUFFER_BASED_ALGO: 5, /** * Minimum number of bytes sampled before we trust the estimate. * If we have not sampled much data, our estimate may not be accurate * enough to trust. * If the total of bytes sampled is less than this value, we use a * default estimate. * This specific value is based on experimentations. * @type {Number} */ ABR_MINIMUM_TOTAL_BYTES: 15e4, /** * Minimum number of bytes, under which samples are discarded. * Our models do not include latency information, so connection startup time * (time to first byte) is considered part of the download time. * Because of this, we should ignore very small downloads which would cause * our estimate to be too low. * This specific value is based on experimentation. * @type {Number} */ ABR_MINIMUM_CHUNK_SIZE: 16e3, /** * Factor with which is multiplied the bandwidth estimate when the ABR is in * starvation mode. * @type {Object} */ ABR_STARVATION_FACTOR: { DEFAULT: 0.72, LOW_LATENCY: 0.72 }, /** * Factor with which is multiplied the bandwidth estimate when the ABR is not * in starvation mode. * @type {Object} */ ABR_REGULAR_FACTOR: { DEFAULT: 0.72, LOW_LATENCY: 0.72 }, /** * If a media buffer has less than ABR_STARVATION_GAP in seconds ahead of the * current position in its buffer, the adaptive logic will go into starvation * mode. * * It gets out of starvation mode when the OUT_OF_STARVATION_GAP value is * reached. * * Under this starvation mode: * * - the bandwidth considered will be a little lower than the one estimated * * - the time the next important request take will be checked * multiple times to detect when/if it takes too much time. * If the request is considered too long, the bitrate will be hastily * re-calculated from this single request. * * @type {Object} */ ABR_STARVATION_GAP: { DEFAULT: 5, LOW_LATENCY: 5 }, OUT_OF_STARVATION_GAP: { DEFAULT: 7, LOW_LATENCY: 7 }, /** * This is a security to avoid going into starvation mode when the content is * ending (@see ABR_STARVATION_GAP). * Basically, we subtract that value from the global duration of the content * and we never enter "starvation mode" if the currently available buffer * (which equals to the current position + the available buffer ahead of it) * is equal or higher than this value. * @type {Number} */ ABR_STARVATION_DURATION_DELTA: 0.1, /** * Half-life, in seconds for a fastly-evolving exponential weighted moving * average. * The lower it is, the faster the ABR logic will react to the bandwidth * falling quickly. * Should be kept to a lower number than ABR_SLOW_EMA for coherency reasons. * @type {Number} */ ABR_FAST_EMA: 2, /** * Half-life, in seconds for a slowly-evolving exponential weighted moving * average. * The lower it is, the faster the ABR logic is going to react to recent * bandwidth variation, on the higher and on the lower side. * Should be kept to a higher number than ABR_FAST_EMA for coherency reasons. * @type {Number} */ ABR_SLOW_EMA: 10, /** * Number of seconds ahead in the buffer after which playback will resume when * seeking on an unbuffered part of the content. * @type {Number} */ RESUME_GAP_AFTER_SEEKING: { DEFAULT: 1.5, LOW_LATENCY: 0.5 }, /** * Number of seconds ahead in the buffer after which playback will resume when * the player was rebuffering due to a low readyState. * @type {Number} */ RESUME_GAP_AFTER_NOT_ENOUGH_DATA: { DEFAULT: 0.5, LOW_LATENCY: 0.5 }, /** * Number of seconds ahead in the buffer after which playback will resume * after the player went through a buffering step. * @type {Number} */ RESUME_GAP_AFTER_BUFFERING: { DEFAULT: 5, LOW_LATENCY: 0.5 }, /** * Maximum number of seconds in the buffer based on which a "rebuffering" * strategy will be considered: * The player will pause playback to get enough time building a sufficient * buffer. This mostly happen when seeking in an unbuffered part or when not * enough buffer is ahead of the current position. * @type {Number} */ REBUFFERING_GAP: { DEFAULT: 0.5, LOW_LATENCY: 0.2 }, /** * Amount of time (in seconds) with data ahead of the current position, at * which we always consider the browser to be able to play. * * If the media element has this amount of data in advance or more but * playback cannot begin, the player will consider it "freezing". */ MINIMUM_BUFFER_AMOUNT_BEFORE_FREEZING: 2, /** * A media whose position inexplicably does not increment despite playing is * called as "freezing" in the RxPlayer. * * If the media is still "freezing" after waiting for `UNFREEZING_SEEK_DELAY` * milliseconds, the RxPlayer will try to un-freeze the situation by interacting * with the media element. * * Those interactions can be costly in time before playback continue, so it * should be set at a sufficiently high value to avoid false positives. */ UNFREEZING_SEEK_DELAY: 6e3, /** * A media whose position inexplicably does not increment despite playing is * called as "freezing" in the RxPlayer. * * A small freezing interval may be normal as the browser may take time before * playing, e.g. after a seek. * * If the media is still "freezing" after waiting for `FREEZING_STALLED_DELAY` * milliseconds, the RxPlayer will emit a BUFFERING state through its API to * notify that the player cannot currently advance. */ FREEZING_STALLED_DELAY: 600, /** * A media whose position inexplicably does not increment despite playing is * called as "freezing" in the RxPlayer. * * If the media is frozen for a sufficiently large time * (@see UNFREEZING_SEEK_DELAY), the RxPlayer will perform a seek corresponding * to its current position plus `UNFREEZING_DELTA_POSITION` seconds. * * This should be kept short enough as the goal is just to un-freeze lower-level * buffers. */ UNFREEZING_DELTA_POSITION: 1e-3, /** * `FREEZING` is a situation where the playback does not seem to advance despite * all web indicators telling us we can. * Those may be linked to device issues, but sometimes are just linked to * performance or it may be just decryption negotiations taking more time than * expected. * * Anyway we might in the RxPlayer "flush" the buffer in that situation to * un-stuck playback (this is usually done by seeking close to the current * position), * * Yet that "flush" attempt may not in the end be succesful. * * If a flush was performed more than `FREEZING_FLUSH_FAILURE_DELAY.MINIMUM` * milliseconds ago and less than `FREEZING_FLUSH_FAILURE_DELAY.MAXIMUM` * milliseconds ago, yet a `FREEZING` situation at roughly the same playback * position (deviating from less than * `FREEZING_FLUSH_FAILURE_DELAY.POSITION_DELTA` seconds from it) is * encountered again, we will consider that the flushing attempt was unsuccesful * and try more agressive solutions (such as reloading the content). */ FREEZING_FLUSH_FAILURE_DELAY: { MAXIMUM: 2e4, MINIMUM: 4e3, POSITION_DELTA: 1 }, /** * The RxPlayer has a recurring logic which will synchronize the browser's * buffers' buffered time ranges with its internal representation in the * RxPlayer to then rely on that internal representation to determine where * segments are technically present in the browser's buffer. * * We found out that when inserting a new segment to the buffer, the browser * may actually take time before actually considering the full segment in its * advertised buffered time ranges. * * This value thus set an amount of milliseconds we might want to wait before * being sure that the buffered time ranges should have considered a segment * that has been pushed. */ SEGMENT_SYNCHRONIZATION_DELAY: 1500, /** * The `SEGMENT_SYNCHRONIZATION_DELAY` defined in this same configuration * object only needs to be used if it appears that the current buffered * time ranges do not reflect the full data of a pushed segment yet. * * The `MISSING_DATA_TRIGGER_SYNC_DELAY` value thus allows to define a * minimum time difference in seconds between what's buffered and what the * segment's ranges should have been, from which we might consider that we may * want to wait the `SEGMENT_SYNCHRONIZATION_DELAY` before trusting the buffered * time ranges for that segment. * If what's missing from that segment is however less than that value in * seconds, we can begin to trust the reported buffered time ranges. * * Should generally be inferior to `MAX_TIME_MISSING_FROM_COMPLETE_SEGMENT`. */ MISSING_DATA_TRIGGER_SYNC_DELAY: 0.1, /** * Maximum authorized difference between what we calculated to be the * beginning or end of the segment in a media buffer and what we * actually are noticing now. * * If the segment seems to have removed more than this size in seconds, we * will infer that the segment has been garbage collected and we might try to * re-download it. * @type {Number} */ MAX_TIME_MISSING_FROM_COMPLETE_SEGMENT: 0.15, /** * The maximum authorized difference, in seconds, between the real buffered * time of a given chunk and what the segment information of the Manifest * tells us. * * Setting a value too high can lead to parts of the media buffer being * linked to the wrong segments and to segments wrongly believed to be still * complete (instead of garbage collected). * * Setting a value too low can lead to parts of the media buffer not being * linked to the concerned segment and to segments wrongly believed to be * partly garbage collected (instead of complete segments). * @type {Number} */ MAX_MANIFEST_BUFFERED_START_END_DIFFERENCE: 0.4, /** * The maximum authorized difference, in seconds, between the duration a * segment should have according to the Manifest and the actual duration it * seems to have once pushed to the media buffer. * * Setting a value too high can lead to parts of the media buffer being * linked to the wrong segments and to segments wrongly believed to be still * complete (instead of garbage collected). * * Setting a value too low can lead to parts of the media buffer not being * linked to the concerned segment and to segments wrongly believed to be * partly garbage collected (instead of complete segments). This last point * could lead to unnecessary segment re-downloading. * @type {Number} */ MAX_MANIFEST_BUFFERED_DURATION_DIFFERENCE: 0.3, /** * Minimum duration in seconds a segment should be into a buffered range to be * considered as part of that range. * Segments which have less than this amount of time "linked" to a buffered * range will be deleted. * * Setting a value too low can lead in worst-case scenarios to segments being * wrongly linked to the next or previous range it is truly linked too (if * those ranges are too close). * * Setting a value too high can lead to part of the buffer not being assigned * any segment. It also limits the minimum duration a segment can be. * * TODO As of now, this limits the minimum size a complete segment can be. A * better logic would be to also consider the duration of a segment. Though * this logic could lead to bugs with the current code. * @type {Number} */ MINIMUM_SEGMENT_SIZE: 1e-3, /** * Append windows allow to filter media data from segments if they are outside * a given limit. * Coded frames with presentation timestamp within this range are allowed to * be appended to the media buffer while coded frames outside this range are * filtered out. * * Those are often set to be the start and end of the "Period" the segment is * in. * However, we noticed that some browsers were too aggressive when the exact * limits were set: more data than needed was removed, often leading to * discontinuities. * * Those securities are added to the set windows (substracted from the window * start and added to the window end) to avoid those problems. * @type {Object} */ APPEND_WINDOW_SECURITIES: { START: 0.2, END: 0.1 }, /** * Maximum interval at which text tracks are refreshed in an "html" * textTrackMode. * * The text tracks are also refreshed on various video events, this interval * will only trigger a refresh if none of those events was received during * that timespan. * * Note that if the TextTrack cue did not change between two intervals or * events, the DOM won't be refreshed. * The TextTrack cues structure is also optimized for fast retrieval. * We should thus not have much of a performance impact here if we set a low * interval. * * @type {Number} */ MAXIMUM_HTML_TEXT_TRACK_UPDATE_INTERVAL: 50, /** * On browsers with no ResizeObserver API, this will be the interval in * milliseconds at which we should check if the text track element has * changed its size, and updates proportional text-track data accordingly * (like a proportional font-size). * * This is only used: * - in an "html" textTrackMode * - when some styling is proportional in the text track data * * Putting a value too low will render faster but might use to much proc time. * Putting a value too high might provoke a re-render too late after the user * changed the element's size (e.g. when going to fullscreen mode). * * @type {Number} */ TEXT_TRACK_SIZE_CHECKS_INTERVAL: 250, /** * The Buffer padding is a time offset from the current time that affects * the buffer. * * Basically, from a given time, if the current buffer gap number (time * between the current time and the end of the downloaded buffer) is above * the padding described here (of the corresponding type), we won't * reschedule segments for that range. * * This is to avoid excessive re-buffering. * * Keeping the padding too low would increase the risk of re-bufferings. * * Keeping the padding too high would delay visible quality increase. * * @type {Object} */ BUFFER_PADDING: { audio: 1, // only "audio" segments video: 3, // only "video" segments other: 1 // tracks which are not audio/video (like text). }, /** * Segments of different types are downloaded by steps: