@zoom/probesdk
Version:
Zoom ProbeSDK tests the end user device and network capabilities, and the Zoom server connection.
802 lines (742 loc) • 32.2 kB
HTML
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>JSDoc: Source: prober.js</title>
<script src="scripts/prettify/prettify.js"></script>
<script src="scripts/prettify/lang-css.js"></script>
<!--[if lt IE 9]>
<script src="//html5shiv.googlecode.com/svn/trunk/html5.js"></script>
<![endif]-->
<link
type="text/css"
rel="stylesheet"
href="styles/prettify-tomorrow.css"
/>
<link type="text/css" rel="stylesheet" href="styles/jsdoc-default.css" />
</head>
<body>
<div id="main">
<h1 class="page-title">Source: prober.js</h1>
<section>
<article>
<pre
class="prettyprint source linenums"
><code>import NetworkAgent from "../basic/network/network_agent";
import RenderersProxy from "../basic/render/renderers_proxy";
import Feature from "../basic/features/feature";
import Hardware from "../basic/hardware/hardware";
import Browser from "../basic/software/browser";
import {
ERR_CODE,
RENDERER_TYPE,
DEF_PROBE_DURATION,
DEF_CONNECT_TIMEOUT,
NET_PROBING_DATA_TYPE,
} from "./prober_const";
import { checkType } from "../basic/utils/utils";
/**
* Prober provides the capabilities of requesting the media permissions and devices,
* diagnosing the devices and network, and reporting the diagnostic result, etc. It's easy
* to create an instance of Prober class and use it to implement your own probing requirements.
*
* @since 1.0.0
* @author clever.su@zoom.us
*
* @example
* import { Prober } from "@zoom/probesdk";
* const prober = new Prober();
* prober.diagnoseVideo(constraints, options);
*/
class Prober {
// the agent of network prober
#networkAgent = new NetworkAgent();
constructor() {}
/**
* Requests media device permission asynchronously.
*
* @async
* @function requestMediaDevicePermission
* @param {MediaStreamConstraints} constraints The constraints for the requested media stream.
* @returns {Promise<{stream: MediaStream, error: Error}>} A promise that resolves to an object containing either the requested media stream or an error object.
* The error object may be an instance of {@link https://developer.mozilla.org/en-US/docs/Web/API/DOMException|DOMException} or other standard system exceptions.
* @throws {Error} If the constraint argument is undefined.
*
* @example
* import { Prober } from "@zoom/probesdk";
* const prober = new Prober();
* function getMediaDevices() {
* prober.requestMediaDevices().then((result) => {
* console.log(`error:${result.error}, devices=${result.devices}`);
* });
* }
*/
async requestMediaDevicePermission(constraints) {
if (constraints == undefined) {
throw new Error("Invalid argument");
}
let mediaPmsResult = {};
try {
mediaPmsResult.stream = await navigator.mediaDevices.getUserMedia(
constraints
);
} catch (e) {
mediaPmsResult.error = e;
}
return mediaPmsResult;
}
/**
* An object, indicates an error generated by ProberSDK.
*
* @typedef {object} PSDKError
* @property {number} code an error code defined by @see {@link ERR_CODE}.
* @property {string} message the error message.
*/
/**
* Requests the media devices asynchronously.
*
* This function checks if the `enumerateDevices` method is supported the browser's `navigator.mediaDevices` object.
* If supported, it retrieves a list of available media devices. If not, it returns an object of @see {@link PSDKError}
* indicating the lack of support.
*
* @async
* @function requestMediaDevices
* @returns {Promise<{devices: MediaDeviceInfo[], error: {code: number, message: string}}>} a promise that resolves to an object containing an array of available media devices or an error object.
* The error object may be an instance of {@link https://developer.mozilla.org/en-US/docs/Web/API/DOMException|DOMException} or other standard system exceptions.
* @example
* prober.requestMediaDevices().then((result) => {
* console.log(`error:${result.error}, devices=${result.devices}`);
* });
*/
async requestMediaDevices() {
let mdResult = {};
if (!navigator.mediaDevices?.enumerateDevices) {
mdResult.error = {
code: ERR_CODE.API_NOT_SUPPORTED,
message: "enumerateDevices not supported",
};
} else {
try {
const devices = await navigator.mediaDevices.enumerateDevices();
mdResult.devices = devices;
} catch (error) {
mdResult.error = error;
}
}
return mdResult;
}
/**
* An object represents the result of diagnostics.
*
* @typedef {object} DiagnosticResult
* @property {number} code an error code defined by @see {@link ERR_CODE}.
* @property {string} message an error message.
*/
/**
* Performs audio diagnostic by recording audio from an input device and playing it through an output device.
* Diagnose any audio input/output devices by passing the constraints of the selected devices.
* Adjust how long you record the audio by setting the {@link duration} parameter. Specify the mime type of how
* to record the audio by setting the {@link mimeType} parameter.
*
* @function diagnoseAudio
* @param {MediaStreamConstraints} inputConstraints The constraints for capturing audio from input devices.
* @param {object} outputConstraints The constraints for playing audio through output devices.
* @param {number} [duration=0] The duration of the recording in milliseconds. If 0, 5000 milliseconds is used as default value.
* @param {string|undefined} [mimeType=''] The MIME type of the recorded audio. Default is an empty string. If pass an empty string or undefined,
* the mime type 'audio/webm;codecs=opus' will be used as the default value.
* @returns {DiagnosticResult} An object indicating the result of the audio diagnostic.
* @throws {Error} If any parameters are invalid, a customized Error will be thrown.
* The standard exceptions, like {@link https://developer.mozilla.org/en-US/docs/Web/API/DOMException|DOMException}, will be thrown if are captured while recording and playing.
*
* @example
* const audioInputConstraint = {
* audio: { deviceId: 'default' },
* video: false,
* };
*
* const audioOutputConstraint = {
* audio: { deviceId: 'xxxxxxxxxxxxxxxx' },
* video: false,
* };
*
* try {
* const diagnoseResult = prober.diagnoseAudio(audioInputConstraint, audioOutputConstraint, 0, '');
* console.log(diagnoseResult);
* } catch (e) {
* console.error(e);
* }
*/
diagnoseAudio(
inputConstraints,
outputConstraints,
duration = 0,
mimeType = ""
) {
if (
inputConstraints == undefined ||
outputConstraints == undefined ||
duration < 0
) {
throw new Error(
`Invalid arguments. inputConstraint:${inputConstraints}, outputConstraint:${outputConstraints}, duration:${duration}`
);
}
let _duration = duration;
if (_duration == 0) {
_duration = 5000; // record 5 seconds as default
}
let _mimeType = mimeType;
if (_mimeType == "" || _mimeType == undefined) {
_mimeType = "audio/webm;codecs=opus";
let isMimeTypeSupported = Hardware.isMimeTypeSupported(_mimeType);
if (!isMimeTypeSupported) {
console.log(`diagnoseAudio() mimeType(${_mimeType}) is not supported.`);
_mimeType = "audio/mp4";
isMimeTypeSupported = Hardware.isMimeTypeSupported(_mimeType);
if (!isMimeTypeSupported) {
console.log(
`diagnoseAudio() mimeType(${_mimeType}) is not supported.`
);
_mimeType = "audio/mp3";
isMimeTypeSupported = Hardware.isMimeTypeSupported(_mimeType);
if (!isMimeTypeSupported) {
console.log(
`diagnoseAudio() mimeType(${_mimeType}) is not supported.`
);
throw new Error(`diagnoseAudio() no supported mimeType available!`);
}
}
}
}
navigator.mediaDevices
.getUserMedia(inputConstraints)
.then((stream) => {
const mediaRecorder = new MediaRecorder(stream, {
mimeType: _mimeType,
});
const audioOutputDeviceId = outputConstraints.audio.deviceId;
const audioContext = new AudioContext({ sinkId: audioOutputDeviceId });
let recordedBlobs = [];
mediaRecorder.ondataavailable = (e) => {
if (e.data.size > 0) {
recordedBlobs.push(e.data);
}
};
mediaRecorder.start();
setTimeout(() => {
mediaRecorder.stop();
}, duration);
mediaRecorder.addEventListener("stop", () => {
const blob = new Blob(recordedBlobs, {
type: _mimeType,
});
if (Browser.isSafari(navigator).matched) {
this.#playAudioWithAudioContext(audioContext, blob)
.then(() => {
console.log(`audio recording is playing on Safari.`);
})
.catch((e) => {
console.error(`error in playing on Safari. error: ${e}`);
});
} else {
const url = URL.createObjectURL(blob);
const audio = new Audio(url);
const source = audioContext.createMediaElementSource(audio);
source.connect(audioContext.destination);
audio.play();
}
});
})
.catch((e) => {
throw e; // external caller handles the errors
});
return {
code: ERR_CODE.OK,
message: "audio diagnostic is started!",
};
}
/**
* An object represents the configuration of network diagnostic.
*
* @typedef {object} NetworkDiagnosticConfig
* @property {number} connectTimeout the timeout of the connections established in a network diagnostic. If not set, the default value is {@link DEF_CONNECT_TIMEOUT}.
* @property {number} probeDuration the duration of how long a round of a network diagnostic. If not set, the default value is {@link DEF_PROBE_DURATION}. If set, the maximum value between {@link DEF_PROBE_DURATION} and {@link probeDuration} is used as the final probing duration.
* @property {string} domain the domain of the prober server. Provide your own domain or use the default domain provided by Zoom if not set.
*/
/**
* An object describes how to render video streams.
*
* @typedef {object} RendererOptions
* @property {number} rendererType the renderer type, refer to the values of {@link RENDERER_TYPE}.
* @property {HTMLMediaElement|HTMLCanvasElement|OffscreenCanvas} target where to render a video stream.
* @property {MediaStream} stream a stream that contains a specified list of tracks of video. It's optional but necessary when stopping to diagnose a video device.
*/
/**
* Diagnose video device like camera and show the result on a renderable object.
* You can select a camera by setting {@link constraints} and different renderer type by setting the parameter {@link options}.
* Four renderer types are supported by ProbeSDK. Please refer to the documentation of @see {@link RENDERER_TYPE}.
*
* Once the video diagnostic is launched, a {@link MediaStream} object is created and appended to the {@link RendererOptions} as an extended field `stream`.
* It is optional but necessary when you want to stop the video diagnostic, if not stopping it, the camera capturing currently is always working.
*
* Different renderer types require different {@link RendererOptions}:
* - VIDEO_TAG requires a video element({@link HTMLMediaElement})
* - WebGL/WebGL2/WebGPU requires a canvas({@link HTMLCanvasElement}) or an OffscreenCanvas({@link OffscreenCanvas})
*
* @async
* @function diagnoseVideo
* @param {MediaStreamConstraints} constraints A camera constraints which is diagnosed.
* @param {RendererOptions} options The options of how to render a video stream, includes the a renderer type and a target where to render.
* @returns {DiagnosticResult} Indicates the result of the video diagnostic.
* @throws {Error} If any parameters are invalid, a customized Error will be thrown, or some standard exceptions like {@link DOMException} will be thrown during the diagnostic
*
* @example
* // for video tag case, you can:
* // in your html file, to define a video tag
* <video id="local_preview_video" width="640" height="480" style="background: gainsboro;" autoplay hidden></video>
*
* const preview_video = document.getElementById('local_preview_video');
* const constraints = {
* video: {
* width: preview_video.width,
* height: preview_video.height,
* deviceId: document.getElementById("camera_list").value, // need a deviceId of a camera
* },
* };
*
* const options = {
* rendererType: 1, // 1 for video_tag
* target: preview_video,
* };
*
* const diagnosticResult = await prober.diagnoseVideo(constraints, options);
* console.log(diagnosticResult);
*
* // for WebGL/WebGL2/WebGPU case, you can:
* // in your html file, to define a canvas
* <canvas id="local_preview_canvas" width="640" height="480"></canvas></br>
*
* const preview_canvas = document.getElementById('local_preview_canvas');
* const constraints = {
* video: {
* width: preview_canvas.width,
* height: preview_canvas.height,
* deviceId: document.getElementById("camera_list").value, // need a deviceId of a camera
* },
* };
*
* const options = {
* rendererType: 2, // WebGL
* target: preview_canvas,
* };
*
* const diagnosticResult = await prober.diagnoseVideo(constraints, options);
* console.log(diagnosticResult);
*/
async diagnoseVideo(constraints, options) {
if (constraints == undefined || options == undefined) {
throw new Error(
`Invalid arguments. constraints:${constraints}, options:${options}`
);
}
const diagnosticResult = {
code: ERR_CODE.OK,
message: "video diagnostic is started!",
};
const isRendererTypeSupported = await this.#isRendererTypeSupported(
options.rendererType
);
if (!isRendererTypeSupported) {
diagnosticResult.code = ERR_CODE.API_NOT_SUPPORTED;
diagnosticResult.message = `Not Supported renderer type. (arg)options.rendererType:${options.rendererType}`;
return diagnosticResult;
}
if (options.rendererType === RENDERER_TYPE.VIDEO_TAG) {
const isTypeCheckPass = this.#checkArgTypes(options.target, [
HTMLMediaElement,
]);
if (!isTypeCheckPass) {
diagnosticResult.code = ERR_CODE.INVALID_ARGS;
diagnosticResult.message = `Invalid target type. (arg)options.target:${options.target}`;
return diagnosticResult;
}
} else if (
options.rendererType === RENDERER_TYPE.WEBGL ||
options.rendererType === RENDERER_TYPE.WEBGL_2 ||
options.rendererType === RENDERER_TYPE.WEBGPU
) {
const areTypesCheckPass = this.#checkArgTypes(options.target, [
HTMLCanvasElement,
OffscreenCanvas,
]);
if (!areTypesCheckPass) {
diagnosticResult.code = ERR_CODE.INVALID_ARGS;
diagnosticResult.message = `Invalid target type. (arg)options.target:${options.target}`;
return diagnosticResult;
}
} else {
diagnosticResult.code = ERR_CODE.INVALID_ARGS;
diagnosticResult.message = `Invalid renderer type. (arg)type:${options.rendererType}`;
return diagnosticResult;
}
const rendersProxy = RenderersProxy.getInstance();
navigator.mediaDevices
.getUserMedia(constraints)
.then(async (stream) => {
if (options.rendererType == RENDERER_TYPE.VIDEO_TAG) {
// render stream to a video element
options.target.srcObject = stream;
options.stream = stream;
} else if (
options.rendererType == RENDERER_TYPE.WEBGL ||
options.rendererType == RENDERER_TYPE.WEBGL_2 ||
options.rendererType == RENDERER_TYPE.WEBGPU
) {
// create a video element as the source seeding to a canvas for rendering
options.stream = stream;
const video = document.createElement("video");
video.width = options.target.width;
video.height = options.target.height;
video.loop = true;
video.autoplay = true;
video.muted = true;
video.srcObject = stream;
await video.play();
// use canvas as the viewport
const viewport = {
x: 0,
y: 0,
w: options.target.width,
h: options.target.height,
};
rendersProxy.preview(
options.rendererType,
video,
options.target,
viewport
);
}
})
.catch((e) => {
throw e;
});
return diagnosticResult;
}
/**
* Stops the video diagnostic that was started by the {@link diagnoseVideo} method.
*
* Once the video diagnostic is launched, an object of {@link RendererOptions} is passed to the function {@link diagnoseVideo}.
* A {@link MediaStream} object is set to the object of {@link RendererOptions} that is used to stop the video diagnostic here.
* Each {@link MediaStreamTrack} will be stopped and removed from the stream.
*
* The frontend or any caller should pass the same object of {@link RendererOptions} and do extra work after the video diagnostic is stopped,
* like removing the video element or the canvas on the screen.
*
* @function stopToDiagnoseVideo
* @param {RendererOptions} options The options of how to render a video stream, includes the a renderer type and a target where to render.
* @returns {boolean} Returns true if the video diagnostic is stopped successfully, otherwise returns false.
*
* @example
* document.getElementById("btn_stop_preview").addEventListener("click", () =>{
* let result = prober.stopToDiagnoseVideo(diagnoseVideoOptions);
* diagnoseVideoOptions = null;
* console.log(`stopToDiagnoseVideo() result: ${result}`);
* });
*/
stopToDiagnoseVideo(options) {
if (!options) {
console.error(
`stopToDiagnoseVideo() options is null! Cannot stop to diagnose video.`
);
return false;
}
const rendersProxy = RenderersProxy.getInstance();
return rendersProxy.stopPreview(options);
}
/**
* An object describes the statistics of a network diagnostic.
*
* @typedef {object} NetworkDiagnosticStatsData
* @property {number} bandwidth bandwidth(kb/s).
* @property {number} bw_level the quality level of the bandwidth, refer to {@link BANDWIDTH_QUALITY_LEVEL}.
* @property {number} jitter jitter(ms).
* @property {number} lossRate the rate of package loss(%).
* @property {number} rtt the round-trip time(ms).
* @property {number} network_level the quality level of the network, refer to {@link NETWORK_QUALITY_LEVEL}.
*/
/**
* An object describes the report of the final and average statistics of a network diagnostic.
*
* @typedef {object} NetworkDiagnosticStatsReport
* @property {number} uplink_bandwidth the last uplink bandwidth, kb/s.
* @property {number} uplink_avg_loss the average value of uplink package loss(%).
* @property {number} uplink_avg_rtt the average value of uplink round-trip time(ms).
* @property {number} uplink_avg_jitter the average value of uplink jitter(ms).
* @property {number} uplink_bw_level the last uplink bandwidth quality level, refer to {@link BANDWIDTH_QUALITY_LEVEL}.
* @property {number} uplink_network_level the last uplink network quality level, refer to {@link NETWORK_QUALITY_LEVEL}.
* @property {number} downlink_bandwidth the last downlink bandwidth, kb/s.
* @property {number} downlink_avg_loss the average value of downlink package loss(%).
* @property {number} downlink_avg_rtt the average value of downlink round-trip time(ms).
* @property {number} downlink_avg_jitter the average value of downlink jitter(ms).
* @property {number} downlink_bw_level the last downlink bandwidth quality level, refer to {@link BANDWIDTH_QUALITY_LEVEL}.
* @property {number} downlink_network_level the last downlink network quality level, refer to {@link NETWORK_QUALITY_LEVEL}.
*/
/**
* An object describes the content/data part of the real-time network diagnostic statistics.
*
* @typedef {object} NetworkDiagnosticStatsContent
* @property {string} path indicates the statistics coming from uplink or downlink.
* @property {NetworkDiagnosticStatsData} statistics the statistics of uplink or downlink.
*/
/**
* An object describes the real-time network diagnostic statistics.
*
* @typedef {object} NetworkDiagnosticStats
* @property {number} type indicates whether the data is a real-time statistics or the final report. Refer to {@link NET_PROBING_DATA_TYPE} for details.
* @property {NetworkDiagnosticStatsContent} content indicates the content of the real-time statistics.
*/
/**
* A function object is used as a listener to listen the network diagnostic statistics.
*
* @typedef {object} NetworkStatsListener
* @property {function} onStatsReceived callback function which receives an instance of {@link NetworkDiagnosticStats}.
*/
/**
* An object represents the details of protocols which are used in the network diagnostics.
*
* @typedef {object} ProtocolEntry
* @property {number} type the type of the protocols, refer to {@link PROTOCOL_TYPE}.
* @property {boolean} isBlocked indicates whether the protocol is blocked or not.
* @property {string} port the port that a protocol uses.
* @property {string} tip a tip will help if the protocol or port is blocked or not.
* @property {*} error some customized errors or standard errors, like {@link https://developer.mozilla.org/en-US/docs/Web/API/DOMException|DOMException}, will be thrown if any. If no exceptions, it is undefined.
*/
/**
* An object describes the report of the network diagnostic.
*
* @typedef {object} NetworkDiagnosticResult
* @property {string} serviceZone indicates the service zone, it is a constant currently.
* @property {Array<ProtocolEntry>} protocols an array of protocols used in a network diagnostic.
* @property {NetworkDiagnosticStatsReport} statistics the final report of the network diagnostic statistics.
* @property {string} rid a string is used to track this round of network diagnosis.
*/
/**
* An object describes an entry of an affected feature.
*
* @typedef {object} AffectedFeatureEntry
* @property {string} featureName the name of an affected feature.
*/
/**
* An object describes an entry of basic information.
*
* @typedef {object} BasicInfoEntry
* @property {number} index index of an attribute added to the basic information, refer to {@link BASIC_INFO_ATTR_INDEX}.
* @property {string} attr name/label of an attribute.
* @property {string} val value of an attribute.
* @property {boolean} critical whether the attribute is critical or not. If true, the attribute is critical and a list of affected features will be attached to the affectedFeatures field.
* @property {Array<AffectedFeatureEntry>} affectedFeatures an array of affected features if the {@link critical} value is true, that is, a group of features might be affected if this attribute is not matched.
*/
/**
* An object describes an entry of a supported feature checking.
*
* @typedef {object} CheckItem
* @property {number} index indicates a classification of the requirement, sometimes this field can be ignored.
* @property {string} label the label of a requirement.
* @property {boolean} matched indicates whether a condition of the requirement is matched or not.
* @property {string} tip a tip will help if the condition is not {@link matched}.
*/
/**
* An object describes a piece of supported feature.
*
* @typedef {object} FeatureEntry
* @property {number} index the index of a supported feature, refer to {@link SUPPORTED_FEATURE_INDEX}.
* @property {string} featureName the name of a supported feature.
* @property {boolean} isSupported whether the feature is supported or not.
* @property {Array<CheckItem>} checkList an array of {@link CheckItem} which are used to judge whether the conditions of a supported features are matched or not.
*/
/**
* An object describes a report of the entire diagnostic.
*
* @typedef {object} DiagnosticReport
* @property {NetworkDiagnosticResult} networkDiagnosticResult the report of the network diagnostic part.
* @property {Array<BasicInfoEntry>} basicInfo a set of basic information, like browser, OS, hardware, etc.
* @property {Array<FeatureEntry>} supportedFeatures a set of features that are important to the user.
*/
/**
* Start a full diagnostic that includes the network diagnostic, basic information, and supported features report .
* It depends on the network diagnostic. Once it is called, the network diagnostic begins, and a report will be generated automatically after it ends.
*
* Before the diagnosis starts, you need to specify the detection time, connection timeout time and other parameters.
* For the detection time, we recommend setting it at more than 2 minutes. This is because in network diagnosis, if a server is not connected,
* we will try to connect to an alternative server, which will lead to a connection timeout wait.
* It takes up most of the detection time, resulting in less detection time after the connection, resulting in inaccurate data.
* Therefore, we recommend that the detection time be set relatively large, more than 2 minutes, such as 3-5 minutes is OK.
* If you set the time too short, the results of the network diagnosis will not be very accurate and will not help you diagnose the problem.
*
* @function startToDiagnose
* @param {string} [jsUrl=prober.js] a URL of javascript file used for a network diagnostic.
* @param {string} [wasmUrl=prober.wasm] a URL of WebAssembly file used for a network diagnostic.
* @param {NetworkDiagnosticConfig} config indicates the configuration of a network diagnostic.
* @param {NetworkStatsListener|undefined} [networkStatsListener=undefined] the listener to receive network diagnostic statistics, the listener can be set to undefined if you only care about the final network diagnostic.
* @returns {Promise<DiagnosticReport>} a diagnostic report in a promise which includes a set of basic information, supported features, and a final network diagnostic report.
*
* @example
* const jsUrl = 'prober.js';
* const wasmUrl = 'prober.wasm';
* const config = { probeDuration: 120 * 1000, connectTimeout: 20 * 1000, domain: 'zoomdev.us' };
* prober.startToDiagnose(jsUrl, wasmUrl, config, (stats) => {
* console.log(stats);
* }).then((report) => {
* console.log(report);
* });
*/
startToDiagnose(
jsUrl = "prober.js",
wasmUrl = "prober.wasm",
config,
networkStatsListener = undefined
) {
if (!jsUrl) jsUrl = "prober.js";
if (!wasmUrl) wasmUrl = "prober.wasm";
return new Promise((resolve) => {
const proberObserverProxy = {
onStatsObserver: function (stats) {
if (networkStatsListener != undefined) {
networkStatsListener(stats);
}
},
onReportObserver: function (report) {
resolve(report);
},
};
this.#networkAgent.diagnose(jsUrl, wasmUrl, config, proberObserverProxy);
});
}
/**
* Query the trackingId(rid) of last round of probing.
*
* @function queryRid
* @return {string} rid a string is used to track the last round of network diagnosis. If the result is an empty string or undefined, it means that the last round of network diagnosis fails.
*
* @example
* const rid = prober.queryRid();
* console.log(rid);
*/
queryRid() {
return this.#networkAgent.queryRid();
}
#checkArgTypes(arg, types) {
let hasOneTypePassCheck = false;
for (let i = 0; i < types.length; i++) {
const type = types[i];
if (checkType(arg, type)) {
hasOneTypePassCheck = true;
}
}
return hasOneTypePassCheck;
}
async #isRendererTypeSupported(rendererType) {
return new Promise(async (resolve) => {
if (rendererType === RENDERER_TYPE.VIDEO_TAG) {
resolve(true);
} else if (rendererType === RENDERER_TYPE.WEBGL) {
const isWebGLSupported = Feature.isWebGLSupported();
resolve(isWebGLSupported);
} else if (rendererType === RENDERER_TYPE.WEBGL_2) {
const isWebGL2Supported = Feature.isWebGL2Supported();
resolve(isWebGL2Supported);
} else if (rendererType === RENDERER_TYPE.WEBGPU) {
const isWebGPUSupported = await Feature.isWebGPUSupported();
resolve(isWebGPUSupported);
} else {
resolve(false);
}
});
}
#playAudioWithAudioContext(audioContext, blob) {
return this.#blobToArrayBuffer(blob)
.then((arrayBuffer) => audioContext.decodeAudioData(arrayBuffer))
.then((audioBuffer) => {
const source = audioContext.createBufferSource();
source.buffer = audioBuffer;
source.connect(audioContext.destination);
source.start(0);
});
}
#blobToArrayBuffer(blob) {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onloadend = () => resolve(reader.result);
reader.onerror = reject;
reader.readAsArrayBuffer(blob);
});
}
}
export default Prober;
</code></pre>
</article>
</section>
</div>
<nav>
<h2><a href="index.html">Probe SDK Reference</a></h2>
<h3>Classes</h3>
<ul>
<li><a href="Prober.html">Prober</a></li>
</ul>
<h3>Global</h3>
<ul>
<li>
<a href="global.html#BANDWIDTH_QUALITY_LEVEL"
>BANDWIDTH_QUALITY_LEVEL</a
>
</li>
<li>
<a href="global.html#BASIC_INFO_ATTR_INDEX">BASIC_INFO_ATTR_INDEX</a>
</li>
<li>
<a href="global.html#DEF_CONNECT_TIMEOUT">DEF_CONNECT_TIMEOUT</a>
</li>
<li><a href="global.html#DEF_PROBE_DURATION">DEF_PROBE_DURATION</a></li>
<li><a href="global.html#ERR_CODE">ERR_CODE</a></li>
<li>
<a href="global.html#NETWORK_QUALITY_LEVEL">NETWORK_QUALITY_LEVEL</a>
</li>
<li>
<a href="global.html#NET_PROBING_DATA_TYPE">NET_PROBING_DATA_TYPE</a>
</li>
<li><a href="global.html#PROTOCOL_TYPE">PROTOCOL_TYPE</a></li>
<li><a href="global.html#RENDERER_TYPE">RENDERER_TYPE</a></li>
<li>
<a href="global.html#SUPPORTED_FEATURE_INDEX"
>SUPPORTED_FEATURE_INDEX</a
>
</li>
<li><a href="global.html#diagnoseAudio">diagnoseAudio</a></li>
<li><a href="global.html#diagnoseVideo">diagnoseVideo</a></li>
<li><a href="global.html#queryRid">queryRid</a></li>
<li>
<a href="global.html#requestMediaDevicePermission"
>requestMediaDevicePermission</a
>
</li>
<li>
<a href="global.html#requestMediaDevices">requestMediaDevices</a>
</li>
<li><a href="global.html#startToDiagnose">startToDiagnose</a></li>
<li>
<a href="global.html#stopToDiagnoseVideo">stopToDiagnoseVideo</a>
</li>
</ul>
</nav>
<br class="clear" />
<footer>
Documentation generated by
<a href="https://github.com/jsdoc/jsdoc">JSDoc 4.0.2</a> on Wed Aug 07
2024 12:50:56 GMT+0800 (China Standard Time)
</footer>
<script>
prettyPrint();
</script>
<script src="scripts/linenumber.js"></script>
</body>
</html>