woolball-client
Version:
Client-side library for Woolball enabling secure browser resource sharing for distributed AI task processing
1 lines • 11.9 MB
TypeScript
declare const workerCode = "\"use strict\";\n(() => {\n var __create = Object.create;\n var __defProp = Object.defineProperty;\n var __getOwnPropDesc = Object.getOwnPropertyDescriptor;\n var __getOwnPropNames = Object.getOwnPropertyNames;\n var __getProtoOf = Object.getPrototypeOf;\n var __hasOwnProp = Object.prototype.hasOwnProperty;\n var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;\n var __require = /* @__PURE__ */ ((x3) => typeof require !== \"undefined\" ? require : typeof Proxy !== \"undefined\" ? new Proxy(x3, {\n get: (a2, b4) => (typeof require !== \"undefined\" ? require : a2)[b4]\n }) : x3)(function(x3) {\n if (typeof require !== \"undefined\") return require.apply(this, arguments);\n throw Error('Dynamic require of \"' + x3 + '\" is not supported');\n });\n var __esm = (fn2, res) => function __init() {\n return fn2 && (res = (0, fn2[__getOwnPropNames(fn2)[0]])(fn2 = 0)), res;\n };\n var __commonJS = (cb, mod) => function __require2() {\n return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;\n };\n var __export = (target, all) => {\n for (var name in all)\n __defProp(target, name, { get: all[name], enumerable: true });\n };\n var __copyProps = (to2, from, except, desc) => {\n if (from && typeof from === \"object\" || typeof from === \"function\") {\n for (let key of __getOwnPropNames(from))\n if (!__hasOwnProp.call(to2, key) && key !== except)\n __defProp(to2, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });\n }\n return to2;\n };\n var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(\n // If the importer is in node compatibility mode or this is not an ESM\n // file that has been converted to a CommonJS file using a Babel-\n // compatible transform (i.e. \"__esModule\" has not been set), then set\n // \"default\" to the CommonJS \"module.exports\" for node compatibility.\n isNodeMode || !mod || !mod.__esModule ? __defProp(target, \"default\", { value: mod, enumerable: true }) : target,\n mod\n ));\n var __publicField = (obj, key, value) => __defNormalProp(obj, typeof key !== \"symbol\" ? key + \"\" : key, value);\n\n // node_modules/onnxruntime-common/dist/esm/backend-impl.js\n var backends, backendsSortedByPriority, registerBackend, tryResolveAndInitializeBackend, resolveBackendAndExecutionProviders;\n var init_backend_impl = __esm({\n \"node_modules/onnxruntime-common/dist/esm/backend-impl.js\"() {\n backends = /* @__PURE__ */ new Map();\n backendsSortedByPriority = [];\n registerBackend = (name, backend, priority) => {\n if (backend && typeof backend.init === \"function\" && typeof backend.createInferenceSessionHandler === \"function\") {\n const currentBackend = backends.get(name);\n if (currentBackend === void 0) {\n backends.set(name, { backend, priority });\n } else if (currentBackend.priority > priority) {\n return;\n } else if (currentBackend.priority === priority) {\n if (currentBackend.backend !== backend) {\n throw new Error(`cannot register backend \"${name}\" using priority ${priority}`);\n }\n }\n if (priority >= 0) {\n const i4 = backendsSortedByPriority.indexOf(name);\n if (i4 !== -1) {\n backendsSortedByPriority.splice(i4, 1);\n }\n for (let i5 = 0; i5 < backendsSortedByPriority.length; i5++) {\n if (backends.get(backendsSortedByPriority[i5]).priority <= priority) {\n backendsSortedByPriority.splice(i5, 0, name);\n return;\n }\n }\n backendsSortedByPriority.push(name);\n }\n return;\n }\n throw new TypeError(\"not a valid backend\");\n };\n tryResolveAndInitializeBackend = async (backendName) => {\n const backendInfo = backends.get(backendName);\n if (!backendInfo) {\n return \"backend not found.\";\n }\n if (backendInfo.initialized) {\n return backendInfo.backend;\n } else if (backendInfo.aborted) {\n return backendInfo.error;\n } else {\n const isInitializing = !!backendInfo.initPromise;\n try {\n if (!isInitializing) {\n backendInfo.initPromise = backendInfo.backend.init(backendName);\n }\n await backendInfo.initPromise;\n backendInfo.initialized = true;\n return backendInfo.backend;\n } catch (e3) {\n if (!isInitializing) {\n backendInfo.error = `${e3}`;\n backendInfo.aborted = true;\n }\n return backendInfo.error;\n } finally {\n delete backendInfo.initPromise;\n }\n }\n };\n resolveBackendAndExecutionProviders = async (options) => {\n const eps = options.executionProviders || [];\n const backendHints = eps.map((i4) => typeof i4 === \"string\" ? i4 : i4.name);\n const backendNames = backendHints.length === 0 ? backendsSortedByPriority : backendHints;\n let backend;\n const errors = [];\n const availableBackendNames = /* @__PURE__ */ new Set();\n for (const backendName of backendNames) {\n const resolveResult = await tryResolveAndInitializeBackend(backendName);\n if (typeof resolveResult === \"string\") {\n errors.push({ name: backendName, err: resolveResult });\n } else {\n if (!backend) {\n backend = resolveResult;\n }\n if (backend === resolveResult) {\n availableBackendNames.add(backendName);\n }\n }\n }\n if (!backend) {\n throw new Error(`no available backend found. ERR: ${errors.map((e3) => `[${e3.name}] ${e3.err}`).join(\", \")}`);\n }\n for (const { name, err } of errors) {\n if (backendHints.includes(name)) {\n console.warn(`removing requested execution provider \"${name}\" from session options because it is not available: ${err}`);\n }\n }\n const filteredEps = eps.filter((i4) => availableBackendNames.has(typeof i4 === \"string\" ? i4 : i4.name));\n return [\n backend,\n new Proxy(options, {\n get: (target, prop) => {\n if (prop === \"executionProviders\") {\n return filteredEps;\n }\n return Reflect.get(target, prop);\n }\n })\n ];\n };\n }\n });\n\n // node_modules/onnxruntime-common/dist/esm/backend.js\n var init_backend = __esm({\n \"node_modules/onnxruntime-common/dist/esm/backend.js\"() {\n init_backend_impl();\n }\n });\n\n // node_modules/onnxruntime-common/dist/esm/version.js\n var version;\n var init_version = __esm({\n \"node_modules/onnxruntime-common/dist/esm/version.js\"() {\n version = \"1.21.0\";\n }\n });\n\n // node_modules/onnxruntime-common/dist/esm/env-impl.js\n var logLevelValue, env;\n var init_env_impl = __esm({\n \"node_modules/onnxruntime-common/dist/esm/env-impl.js\"() {\n init_version();\n logLevelValue = \"warning\";\n env = {\n wasm: {},\n webgl: {},\n webgpu: {},\n versions: { common: version },\n set logLevel(value) {\n if (value === void 0) {\n return;\n }\n if (typeof value !== \"string\" || [\"verbose\", \"info\", \"warning\", \"error\", \"fatal\"].indexOf(value) === -1) {\n throw new Error(`Unsupported logging level: ${value}`);\n }\n logLevelValue = value;\n },\n get logLevel() {\n return logLevelValue;\n }\n };\n Object.defineProperty(env, \"logLevel\", { enumerable: true });\n }\n });\n\n // node_modules/onnxruntime-common/dist/esm/env.js\n var env2;\n var init_env = __esm({\n \"node_modules/onnxruntime-common/dist/esm/env.js\"() {\n init_env_impl();\n env2 = env;\n }\n });\n\n // node_modules/onnxruntime-common/dist/esm/tensor-conversion-impl.js\n var tensorToDataURL, tensorToImageData;\n var init_tensor_conversion_impl = __esm({\n \"node_modules/onnxruntime-common/dist/esm/tensor-conversion-impl.js\"() {\n tensorToDataURL = (tensor, options) => {\n const canvas = typeof document !== \"undefined\" ? document.createElement(\"canvas\") : new OffscreenCanvas(1, 1);\n canvas.width = tensor.dims[3];\n canvas.height = tensor.dims[2];\n const pixels2DContext = canvas.getContext(\"2d\");\n if (pixels2DContext != null) {\n let width;\n let height;\n if (options?.tensorLayout !== void 0 && options.tensorLayout === \"NHWC\") {\n width = tensor.dims[2];\n height = tensor.dims[3];\n } else {\n width = tensor.dims[3];\n height = tensor.dims[2];\n }\n const inputformat = options?.format !== void 0 ? options.format : \"RGB\";\n const norm = options?.norm;\n let normMean;\n let normBias;\n if (norm === void 0 || norm.mean === void 0) {\n normMean = [255, 255, 255, 255];\n } else {\n if (typeof norm.mean === \"number\") {\n normMean = [norm.mean, norm.mean, norm.mean, norm.mean];\n } else {\n normMean = [norm.mean[0], norm.mean[1], norm.mean[2], 0];\n if (norm.mean[3] !== void 0) {\n normMean[3] = norm.mean[3];\n }\n }\n }\n if (norm === void 0 || norm.bias === void 0) {\n normBias = [0, 0, 0, 0];\n } else {\n if (typeof norm.bias === \"number\") {\n normBias = [norm.bias, norm.bias, norm.bias, norm.bias];\n } else {\n normBias = [norm.bias[0], norm.bias[1], norm.bias[2], 0];\n if (norm.bias[3] !== void 0) {\n normBias[3] = norm.bias[3];\n }\n }\n }\n const stride = height * width;\n let rTensorPointer = 0, gTensorPointer = stride, bTensorPointer = stride * 2, aTensorPointer = -1;\n if (inputformat === \"RGBA\") {\n rTensorPointer = 0;\n gTensorPointer = stride;\n bTensorPointer = stride * 2;\n aTensorPointer = stride * 3;\n } else if (inputformat === \"RGB\") {\n rTensorPointer = 0;\n gTensorPointer = stride;\n bTensorPointer = stride * 2;\n } else if (inputformat === \"RBG\") {\n rTensorPointer = 0;\n bTensorPointer = stride;\n gTensorPointer = stride * 2;\n }\n for (let i4 = 0; i4 < height; i4++) {\n for (let j3 = 0; j3 < width; j3++) {\n const R3 = (tensor.data[rTensorPointer++] - normBias[0]) * normMean[0];\n const G4 = (tensor.data[gTensorPointer++] - normBias[1]) * normMean[1];\n const B3 = (tensor.data[bTensorPointer++] - normBias[2]) * normMean[2];\n const A4 = aTensorPointer === -1 ? 255 : (tensor.data[aTensorPointer++] - normBias[3]) * normMean[3];\n pixels2DContext.fillStyle = \"rgba(\" + R3 + \",\" + G4 + \",\" + B3 + \",\" + A4 + \")\";\n pixels2DContext.fillRect(j3, i4, 1, 1);\n }\n }\n if (\"toDataURL\" in canvas) {\n return canvas.toDataURL();\n } else {\n throw new Error(\"toDataURL is not supported\");\n }\n } else {\n throw new Error(\"Can not access image data\");\n }\n };\n tensorToImageData = (tensor, options) => {\n const pixels2DContext = typeof document !== \"undefined\" ? document.createElement(\"canvas\").getContext(\"2d\") : new OffscreenCanvas(1, 1).getContext(\"2d\");\n let image;\n if (pixels2DContext != null) {\n let width;\n let height;\n let channels;\n if (options?.tensorLayout !== void 0 && options.tensorLayout === \"NHWC\") {\n width = tensor.dims[2];\n height = tensor.dims[1];\n channels = tensor.dims[3];\n } else {\n width = tensor.dims[3];\n height = tensor.dims[2];\n channels = tensor.dims[1];\n }\n const inputformat = options !== void 0 ? options.format !== void 0 ? options.format : \"RGB\" : \"RGB\";\n const norm = options?.norm;\n let normMean;\n let normBias;\n if (norm === void 0 || norm.mean === void 0) {\n normMean = [255, 255, 255, 255];\n } else {\n if (typeof norm.mean === \"number\") {\n normMean = [norm.mean, norm.mean, norm.mean, norm.mean];\n } else {\n normMean = [norm.mean[0], norm.mean[1], norm.mean[2], 255];\n if (norm.mean[3] !== void 0) {\n normMean[3] = norm.mean[3];\n }\n }\n }\n if (norm === void 0 || norm.bias === void 0) {\n normBias = [0, 0, 0, 0];\n } else {\n if (typeof norm.bias === \"number\") {\n normBias = [norm.bias, norm.bias, norm.bias, norm.bias];\n } else {\n normBias = [norm.bias[0], norm.bias[1], norm.bias[2], 0];\n if (norm.bias[3] !== void 0) {\n normBias[3] = norm.bias[3];\n }\n }\n }\n const stride = height * width;\n if (options !== void 0) {\n if (options.format !== void 0 && channels === 4 && options.format !== \"RGBA\" || channels === 3 && options.format !== \"RGB\" && options.format !== \"BGR\") {\n throw new Error(\"Tensor format doesn't match input tensor dims\");\n }\n }\n const step = 4;\n let rImagePointer = 0, gImagePointer = 1, bImagePointer = 2, aImagePointer = 3;\n let rTensorPointer = 0, gTensorPointer = stride, bTensorPointer = stride * 2, aTensorPointer = -1;\n if (inputformat === \"RGBA\") {\n rTensorPointer = 0;\n gTensorPointer = stride;\n bTensorPointer = stride * 2;\n aTensorPointer = stride * 3;\n } else if (inputformat === \"RGB\") {\n rTensorPointer = 0;\n gTensorPointer = stride;\n bTensorPointer = stride * 2;\n } else if (inputformat === \"RBG\") {\n rTensorPointer = 0;\n bTensorPointer = stride;\n gTensorPointer = stride * 2;\n }\n image = pixels2DContext.createImageData(width, height);\n for (let i4 = 0; i4 < height * width; rImagePointer += step, gImagePointer += step, bImagePointer += step, aImagePointer += step, i4++) {\n image.data[rImagePointer] = (tensor.data[rTensorPointer++] - normBias[0]) * normMean[0];\n image.data[gImagePointer] = (tensor.data[gTensorPointer++] - normBias[1]) * normMean[1];\n image.data[bImagePointer] = (tensor.data[bTensorPointer++] - normBias[2]) * normMean[2];\n image.data[aImagePointer] = aTensorPointer === -1 ? 255 : (tensor.data[aTensorPointer++] - normBias[3]) * normMean[3];\n }\n } else {\n throw new Error(\"Can not access image data\");\n }\n return image;\n };\n }\n });\n\n // node_modules/onnxruntime-common/dist/esm/tensor-factory-impl.js\n var bufferToTensor, tensorFromImage, tensorFromTexture, tensorFromGpuBuffer, tensorFromMLTensor, tensorFromPinnedBuffer;\n var init_tensor_factory_impl = __esm({\n \"node_modules/onnxruntime-common/dist/esm/tensor-factory-impl.js\"() {\n init_tensor_impl();\n bufferToTensor = (buffer, options) => {\n if (buffer === void 0) {\n throw new Error(\"Image buffer must be defined\");\n }\n if (options.height === void 0 || options.width === void 0) {\n throw new Error(\"Image height and width must be defined\");\n }\n if (options.tensorLayout === \"NHWC\") {\n throw new Error(\"NHWC Tensor layout is not supported yet\");\n }\n const { height, width } = options;\n const norm = options.norm ?? { mean: 255, bias: 0 };\n let normMean;\n let normBias;\n if (typeof norm.mean === \"number\") {\n normMean = [norm.mean, norm.mean, norm.mean, norm.mean];\n } else {\n normMean = [norm.mean[0], norm.mean[1], norm.mean[2], norm.mean[3] ?? 255];\n }\n if (typeof norm.bias === \"number\") {\n normBias = [norm.bias, norm.bias, norm.bias, norm.bias];\n } else {\n normBias = [norm.bias[0], norm.bias[1], norm.bias[2], norm.bias[3] ?? 0];\n }\n const inputformat = options.format !== void 0 ? options.format : \"RGBA\";\n const outputformat = options.tensorFormat !== void 0 ? options.tensorFormat !== void 0 ? options.tensorFormat : \"RGB\" : \"RGB\";\n const stride = height * width;\n const float32Data = outputformat === \"RGBA\" ? new Float32Array(stride * 4) : new Float32Array(stride * 3);\n let step = 4, rImagePointer = 0, gImagePointer = 1, bImagePointer = 2, aImagePointer = 3;\n let rTensorPointer = 0, gTensorPointer = stride, bTensorPointer = stride * 2, aTensorPointer = -1;\n if (inputformat === \"RGB\") {\n step = 3;\n rImagePointer = 0;\n gImagePointer = 1;\n bImagePointer = 2;\n aImagePointer = -1;\n }\n if (outputformat === \"RGBA\") {\n aTensorPointer = stride * 3;\n } else if (outputformat === \"RBG\") {\n rTensorPointer = 0;\n bTensorPointer = stride;\n gTensorPointer = stride * 2;\n } else if (outputformat === \"BGR\") {\n bTensorPointer = 0;\n gTensorPointer = stride;\n rTensorPointer = stride * 2;\n }\n for (let i4 = 0; i4 < stride; i4++, rImagePointer += step, bImagePointer += step, gImagePointer += step, aImagePointer += step) {\n float32Data[rTensorPointer++] = (buffer[rImagePointer] + normBias[0]) / normMean[0];\n float32Data[gTensorPointer++] = (buffer[gImagePointer] + normBias[1]) / normMean[1];\n float32Data[bTensorPointer++] = (buffer[bImagePointer] + normBias[2]) / normMean[2];\n if (aTensorPointer !== -1 && aImagePointer !== -1) {\n float32Data[aTensorPointer++] = (buffer[aImagePointer] + normBias[3]) / normMean[3];\n }\n }\n const outputTensor = outputformat === \"RGBA\" ? new Tensor(\"float32\", float32Data, [1, 4, height, width]) : new Tensor(\"float32\", float32Data, [1, 3, height, width]);\n return outputTensor;\n };\n tensorFromImage = async (image, options) => {\n const isHTMLImageEle = typeof HTMLImageElement !== \"undefined\" && image instanceof HTMLImageElement;\n const isImageDataEle = typeof ImageData !== \"undefined\" && image instanceof ImageData;\n const isImageBitmap = typeof ImageBitmap !== \"undefined\" && image instanceof ImageBitmap;\n const isString = typeof image === \"string\";\n let data;\n let bufferToTensorOptions = options ?? {};\n const createCanvas = () => {\n if (typeof document !== \"undefined\") {\n return document.createElement(\"canvas\");\n } else if (typeof OffscreenCanvas !== \"undefined\") {\n return new OffscreenCanvas(1, 1);\n } else {\n throw new Error(\"Canvas is not supported\");\n }\n };\n const createCanvasContext = (canvas) => {\n if (typeof HTMLCanvasElement !== \"undefined\" && canvas instanceof HTMLCanvasElement) {\n return canvas.getContext(\"2d\");\n } else if (canvas instanceof OffscreenCanvas) {\n return canvas.getContext(\"2d\");\n } else {\n return null;\n }\n };\n if (isHTMLImageEle) {\n const canvas = createCanvas();\n canvas.width = image.width;\n canvas.height = image.height;\n const pixels2DContext = createCanvasContext(canvas);\n if (pixels2DContext != null) {\n let height = image.height;\n let width = image.width;\n if (options !== void 0 && options.resizedHeight !== void 0 && options.resizedWidth !== void 0) {\n height = options.resizedHeight;\n width = options.resizedWidth;\n }\n if (options !== void 0) {\n bufferToTensorOptions = options;\n if (options.tensorFormat !== void 0) {\n throw new Error(\"Image input config format must be RGBA for HTMLImageElement\");\n } else {\n bufferToTensorOptions.tensorFormat = \"RGBA\";\n }\n bufferToTensorOptions.height = height;\n bufferToTensorOptions.width = width;\n } else {\n bufferToTensorOptions.tensorFormat = \"RGBA\";\n bufferToTensorOptions.height = height;\n bufferToTensorOptions.width = width;\n }\n pixels2DContext.drawImage(image, 0, 0);\n data = pixels2DContext.getImageData(0, 0, width, height).data;\n } else {\n throw new Error(\"Can not access image data\");\n }\n } else if (isImageDataEle) {\n let height;\n let width;\n if (options !== void 0 && options.resizedWidth !== void 0 && options.resizedHeight !== void 0) {\n height = options.resizedHeight;\n width = options.resizedWidth;\n } else {\n height = image.height;\n width = image.width;\n }\n if (options !== void 0) {\n bufferToTensorOptions = options;\n }\n bufferToTensorOptions.format = \"RGBA\";\n bufferToTensorOptions.height = height;\n bufferToTensorOptions.width = width;\n if (options !== void 0) {\n const tempCanvas = createCanvas();\n tempCanvas.width = width;\n tempCanvas.height = height;\n const pixels2DContext = createCanvasContext(tempCanvas);\n if (pixels2DContext != null) {\n pixels2DContext.putImageData(image, 0, 0);\n data = pixels2DContext.getImageData(0, 0, width, height).data;\n } else {\n throw new Error(\"Can not access image data\");\n }\n } else {\n data = image.data;\n }\n } else if (isImageBitmap) {\n if (options === void 0) {\n throw new Error(\"Please provide image config with format for Imagebitmap\");\n }\n const canvas = createCanvas();\n canvas.width = image.width;\n canvas.height = image.height;\n const pixels2DContext = createCanvasContext(canvas);\n if (pixels2DContext != null) {\n const height = image.height;\n const width = image.width;\n pixels2DContext.drawImage(image, 0, 0, width, height);\n data = pixels2DContext.getImageData(0, 0, width, height).data;\n bufferToTensorOptions.height = height;\n bufferToTensorOptions.width = width;\n return bufferToTensor(data, bufferToTensorOptions);\n } else {\n throw new Error(\"Can not access image data\");\n }\n } else if (isString) {\n return new Promise((resolve, reject) => {\n const canvas = createCanvas();\n const context = createCanvasContext(canvas);\n if (!image || !context) {\n return reject();\n }\n const newImage = new Image();\n newImage.crossOrigin = \"Anonymous\";\n newImage.src = image;\n newImage.onload = () => {\n canvas.width = newImage.width;\n canvas.height = newImage.height;\n context.drawImage(newImage, 0, 0, canvas.width, canvas.height);\n const img = context.getImageData(0, 0, canvas.width, canvas.height);\n bufferToTensorOptions.height = canvas.height;\n bufferToTensorOptions.width = canvas.width;\n resolve(bufferToTensor(img.data, bufferToTensorOptions));\n };\n });\n } else {\n throw new Error(\"Input data provided is not supported - aborted tensor creation\");\n }\n if (data !== void 0) {\n return bufferToTensor(data, bufferToTensorOptions);\n } else {\n throw new Error(\"Input data provided is not supported - aborted tensor creation\");\n }\n };\n tensorFromTexture = (texture, options) => {\n const { width, height, download, dispose } = options;\n const dims = [1, height, width, 4];\n return new Tensor({ location: \"texture\", type: \"float32\", texture, dims, download, dispose });\n };\n tensorFromGpuBuffer = (gpuBuffer, options) => {\n const { dataType, dims, download, dispose } = options;\n return new Tensor({ location: \"gpu-buffer\", type: dataType ?? \"float32\", gpuBuffer, dims, download, dispose });\n };\n tensorFromMLTensor = (mlTensor, options) => {\n const { dataType, dims, download, dispose } = options;\n return new Tensor({ location: \"ml-tensor\", type: dataType ?? \"float32\", mlTensor, dims, download, dispose });\n };\n tensorFromPinnedBuffer = (type, buffer, dims) => new Tensor({ location: \"cpu-pinned\", type, data: buffer, dims: dims ?? [buffer.length] });\n }\n });\n\n // node_modules/onnxruntime-common/dist/esm/tensor-impl-type-mapping.js\n var NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP, NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP, isTypedArrayChecked, checkTypedArray;\n var init_tensor_impl_type_mapping = __esm({\n \"node_modules/onnxruntime-common/dist/esm/tensor-impl-type-mapping.js\"() {\n NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP = /* @__PURE__ */ new Map([\n [\"float32\", Float32Array],\n [\"uint8\", Uint8Array],\n [\"int8\", Int8Array],\n [\"uint16\", Uint16Array],\n [\"int16\", Int16Array],\n [\"int32\", Int32Array],\n [\"bool\", Uint8Array],\n [\"float64\", Float64Array],\n [\"uint32\", Uint32Array],\n [\"int4\", Uint8Array],\n [\"uint4\", Uint8Array]\n ]);\n NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP = /* @__PURE__ */ new Map([\n [Float32Array, \"float32\"],\n [Uint8Array, \"uint8\"],\n [Int8Array, \"int8\"],\n [Uint16Array, \"uint16\"],\n [Int16Array, \"int16\"],\n [Int32Array, \"int32\"],\n [Float64Array, \"float64\"],\n [Uint32Array, \"uint32\"]\n ]);\n isTypedArrayChecked = false;\n checkTypedArray = () => {\n if (!isTypedArrayChecked) {\n isTypedArrayChecked = true;\n const isBigInt64ArrayAvailable = typeof BigInt64Array !== \"undefined\" && BigInt64Array.from;\n const isBigUint64ArrayAvailable = typeof BigUint64Array !== \"undefined\" && BigUint64Array.from;\n const Float16Array2 = globalThis.Float16Array;\n const isFloat16ArrayAvailable = typeof Float16Array2 !== \"undefined\" && Float16Array2.from;\n if (isBigInt64ArrayAvailable) {\n NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.set(\"int64\", BigInt64Array);\n NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.set(BigInt64Array, \"int64\");\n }\n if (isBigUint64ArrayAvailable) {\n NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.set(\"uint64\", BigUint64Array);\n NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.set(BigUint64Array, \"uint64\");\n }\n if (isFloat16ArrayAvailable) {\n NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.set(\"float16\", Float16Array2);\n NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.set(Float16Array2, \"float16\");\n } else {\n NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.set(\"float16\", Uint16Array);\n }\n }\n };\n }\n });\n\n // node_modules/onnxruntime-common/dist/esm/tensor-utils-impl.js\n var calculateSize, tensorReshape;\n var init_tensor_utils_impl = __esm({\n \"node_modules/onnxruntime-common/dist/esm/tensor-utils-impl.js\"() {\n init_tensor_impl();\n calculateSize = (dims) => {\n let size = 1;\n for (let i4 = 0; i4 < dims.length; i4++) {\n const dim = dims[i4];\n if (typeof dim !== \"number\" || !Number.isSafeInteger(dim)) {\n throw new TypeError(`dims[${i4}] must be an integer, got: ${dim}`);\n }\n if (dim < 0) {\n throw new RangeError(`dims[${i4}] must be a non-negative integer, got: ${dim}`);\n }\n size *= dim;\n }\n return size;\n };\n tensorReshape = (tensor, dims) => {\n switch (tensor.location) {\n case \"cpu\":\n return new Tensor(tensor.type, tensor.data, dims);\n case \"cpu-pinned\":\n return new Tensor({\n location: \"cpu-pinned\",\n data: tensor.data,\n type: tensor.type,\n dims\n });\n case \"texture\":\n return new Tensor({\n location: \"texture\",\n texture: tensor.texture,\n type: tensor.type,\n dims\n });\n case \"gpu-buffer\":\n return new Tensor({\n location: \"gpu-buffer\",\n gpuBuffer: tensor.gpuBuffer,\n type: tensor.type,\n dims\n });\n case \"ml-tensor\":\n return new Tensor({\n location: \"ml-tensor\",\n mlTensor: tensor.mlTensor,\n type: tensor.type,\n dims\n });\n default:\n throw new Error(`tensorReshape: tensor location ${tensor.location} is not supported`);\n }\n };\n }\n });\n\n // node_modules/onnxruntime-common/dist/esm/tensor-impl.js\n var Tensor;\n var init_tensor_impl = __esm({\n \"node_modules/onnxruntime-common/dist/esm/tensor-impl.js\"() {\n init_tensor_conversion_impl();\n init_tensor_factory_impl();\n init_tensor_impl_type_mapping();\n init_tensor_utils_impl();\n Tensor = class {\n /**\n * implementation.\n */\n constructor(arg0, arg1, arg2) {\n checkTypedArray();\n let type;\n let dims;\n if (typeof arg0 === \"object\" && \"location\" in arg0) {\n this.dataLocation = arg0.location;\n type = arg0.type;\n dims = arg0.dims;\n switch (arg0.location) {\n case \"cpu-pinned\": {\n const expectedTypedArrayConstructor = NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.get(type);\n if (!expectedTypedArrayConstructor) {\n throw new TypeError(`unsupported type \"${type}\" to create tensor from pinned buffer`);\n }\n if (!(arg0.data instanceof expectedTypedArrayConstructor)) {\n throw new TypeError(`buffer should be of type ${expectedTypedArrayConstructor.name}`);\n }\n this.cpuData = arg0.data;\n break;\n }\n case \"texture\": {\n if (type !== \"float32\") {\n throw new TypeError(`unsupported type \"${type}\" to create tensor from texture`);\n }\n this.gpuTextureData = arg0.texture;\n this.downloader = arg0.download;\n this.disposer = arg0.dispose;\n break;\n }\n case \"gpu-buffer\": {\n if (type !== \"float32\" && type !== \"float16\" && type !== \"int32\" && type !== \"int64\" && type !== \"uint32\" && type !== \"uint8\" && type !== \"bool\" && type !== \"uint4\" && type !== \"int4\") {\n throw new TypeError(`unsupported type \"${type}\" to create tensor from gpu buffer`);\n }\n this.gpuBufferData = arg0.gpuBuffer;\n this.downloader = arg0.download;\n this.disposer = arg0.dispose;\n break;\n }\n case \"ml-tensor\": {\n if (type !== \"float32\" && type !== \"float16\" && type !== \"int32\" && type !== \"int64\" && type !== \"uint32\" && type !== \"uint64\" && type !== \"int8\" && type !== \"uint8\" && type !== \"bool\" && type !== \"uint4\" && type !== \"int4\") {\n throw new TypeError(`unsupported type \"${type}\" to create tensor from MLTensor`);\n }\n this.mlTensorData = arg0.mlTensor;\n this.downloader = arg0.download;\n this.disposer = arg0.dispose;\n break;\n }\n default:\n throw new Error(`Tensor constructor: unsupported location '${this.dataLocation}'`);\n }\n } else {\n let data;\n let maybeDims;\n if (typeof arg0 === \"string\") {\n type = arg0;\n maybeDims = arg2;\n if (arg0 === \"string\") {\n if (!Array.isArray(arg1)) {\n throw new TypeError(\"A string tensor's data must be a string array.\");\n }\n data = arg1;\n } else {\n const typedArrayConstructor = NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.get(arg0);\n if (typedArrayConstructor === void 0) {\n throw new TypeError(`Unsupported tensor type: ${arg0}.`);\n }\n if (Array.isArray(arg1)) {\n if (arg0 === \"float16\" && typedArrayConstructor === Uint16Array || arg0 === \"uint4\" || arg0 === \"int4\") {\n throw new TypeError(`Creating a ${arg0} tensor from number array is not supported. Please use ${typedArrayConstructor.name} as data.`);\n } else if (arg0 === \"uint64\" || arg0 === \"int64\") {\n data = typedArrayConstructor.from(arg1, BigInt);\n } else {\n data = typedArrayConstructor.from(arg1);\n }\n } else if (arg1 instanceof typedArrayConstructor) {\n data = arg1;\n } else if (arg1 instanceof Uint8ClampedArray) {\n if (arg0 === \"uint8\") {\n data = Uint8Array.from(arg1);\n } else {\n throw new TypeError(`A Uint8ClampedArray tensor's data must be type of uint8`);\n }\n } else if (arg0 === \"float16\" && arg1 instanceof Uint16Array && typedArrayConstructor !== Uint16Array) {\n data = new globalThis.Float16Array(arg1.buffer, arg1.byteOffset, arg1.length);\n } else {\n throw new TypeError(`A ${type} tensor's data must be type of ${typedArrayConstructor}`);\n }\n }\n } else {\n maybeDims = arg1;\n if (Array.isArray(arg0)) {\n if (arg0.length === 0) {\n throw new TypeError(\"Tensor type cannot be inferred from an empty array.\");\n }\n const firstElementType = typeof arg0[0];\n if (firstElementType === \"string\") {\n type = \"string\";\n data = arg0;\n } else if (firstElementType === \"boolean\") {\n type = \"bool\";\n data = Uint8Array.from(arg0);\n } else {\n throw new TypeError(`Invalid element type of data array: ${firstElementType}.`);\n }\n } else if (arg0 instanceof Uint8ClampedArray) {\n type = \"uint8\";\n data = Uint8Array.from(arg0);\n } else {\n const mappedType = NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.get(arg0.constructor);\n if (mappedType === void 0) {\n throw new TypeError(`Unsupported type for tensor data: ${arg0.constructor}.`);\n }\n type = mappedType;\n data = arg0;\n }\n }\n if (maybeDims === void 0) {\n maybeDims = [data.length];\n } else if (!Array.isArray(maybeDims)) {\n throw new TypeError(\"A tensor's dims must be a number array\");\n }\n dims = maybeDims;\n this.cpuData = data;\n this.dataLocation = \"cpu\";\n }\n const size = calculateSize(dims);\n if (this.cpuData && size !== this.cpuData.length) {\n if ((type === \"uint4\" || type === \"int4\") && Math.ceil(size / 2) === this.cpuData.length) {\n } else {\n throw new Error(`Tensor's size(${size}) does not match data length(${this.cpuData.length}).`);\n }\n }\n this.type = type;\n this.dims = dims;\n this.size = size;\n }\n // #endregion\n // #region factory\n static async fromImage(image, options) {\n return tensorFromImage(image, options);\n }\n static fromTexture(texture, options) {\n return tensorFromTexture(texture, options);\n }\n static fromGpuBuffer(gpuBuffer, options) {\n return tensorFromGpuBuffer(gpuBuffer, options);\n }\n static fromMLTensor(mlTensor, options) {\n return tensorFromMLTensor(mlTensor, options);\n }\n static fromPinnedBuffer(type, buffer, dims) {\n return tensorFromPinnedBuffer(type, buffer, dims);\n }\n // #endregion\n // #region conversions\n toDataURL(options) {\n return tensorToDataURL(this, options);\n }\n toImageData(options) {\n return tensorToImageData(this, options);\n }\n // #endregion\n // #region properties\n get data() {\n this.ensureValid();\n if (!this.cpuData) {\n throw new Error(\"The data is not on CPU. Use `getData()` to download GPU data to CPU, or use `texture` or `gpuBuffer` property to access the GPU data directly.\");\n }\n return this.cpuData;\n }\n get location() {\n return this.dataLocation;\n }\n get texture() {\n this.ensureValid();\n if (!this.gpuTextureData) {\n throw new Error(\"The data is not stored as a WebGL texture.\");\n }\n return this.gpuTextureData;\n }\n get gpuBuffer() {\n this.ensureValid();\n if (!this.gpuBufferData) {\n throw new Error(\"The data is not stored as a WebGPU buffer.\");\n }\n return this.gpuBufferData;\n }\n get mlTensor() {\n this.ensureValid();\n if (!this.mlTensorData) {\n throw new Error(\"The data is not stored as a WebNN MLTensor.\");\n }\n return this.mlTensorData;\n }\n // #endregion\n // #region methods\n async getData(releaseData) {\n this.ensureValid();\n switch (this.dataLocation) {\n case \"cpu\":\n case \"cpu-pinned\":\n return this.data;\n case \"texture\":\n case \"gpu-buffer\":\n case \"ml-tensor\": {\n if (!this.downloader) {\n throw new Error(\"The current tensor is not created with a specified data downloader.\");\n }\n if (this.isDownloading) {\n throw new Error(\"The current tensor is being downloaded.\");\n }\n try {\n this.isDownloading = true;\n const data = await this.downloader();\n this.downloader = void 0;\n this.dataLocation = \"cpu\";\n this.cpuData = data;\n if (releaseData && this.disposer) {\n this.disposer();\n this.disposer = void 0;\n }\n return data;\n } finally {\n this.isDownloading = false;\n }\n }\n default:\n throw new Error(`cannot get data from location: ${this.dataLocation}`);\n }\n }\n dispose() {\n if (this.isDownloading) {\n throw new Error(\"The current tensor is being downloaded.\");\n }\n if (this.disposer) {\n this.disposer();\n this.disposer = void 0;\n }\n this.cpuData = void 0;\n this.gpuTextureData = void 0;\n this.gpuBufferData = void 0;\n this.mlTensorData = void 0;\n this.downloader = void 0;\n this.isDownloading = void 0;\n this.dataLocation = \"none\";\n }\n // #endregion\n // #region tensor utilities\n ensureValid() {\n if (this.dataLocation === \"none\") {\n throw new Error(\"The tensor is disposed.\");\n }\n }\n reshape(dims) {\n this.ensureValid();\n if (this.downloader || this.disposer) {\n throw new Error(\"Cannot reshape a tensor that owns GPU resource.\");\n }\n return tensorReshape(this, dims);\n }\n };\n }\n });\n\n // node_modules/onnxruntime-common/dist/esm/tensor.js\n var Tensor2;\n var init_tensor = __esm({\n \"node_modules/onnxruntime-common/dist/esm/tensor.js\"() {\n init_tensor_impl();\n Tensor2 = Tensor;\n }\n });\n\n // node_modules/onnxruntime-common/dist/esm/trace.js\n var TRACE, TRACE_FUNC, TRACE_FUNC_BEGIN, TRACE_FUNC_END;\n var init_trace = __esm({\n \"node_modules/onnxruntime-common/dist/esm/trace.js\"() {\n init_env_impl();\n TRACE = (deviceType, label) => {\n if (typeof env.trace === \"undefined\" ? !env.wasm.trace : !env.trace) {\n return;\n }\n console.timeStamp(`${deviceType}::ORT::${label}`);\n };\n TRACE_FUNC = (msg, extraMsg) => {\n const stack = new Error().stack?.split(/\\r\\n|\\r|\\n/g) || [];\n let hasTraceFunc = false;\n for (let i4 = 0; i4 < stack.length; i4++) {\n if (hasTraceFunc && !stack[i4].includes(\"TRACE_FUNC\")) {\n let label = `FUNC_${msg}::${stack[i4].trim().split(\" \")[1]}`;\n if (extraMsg) {\n label += `::${extraMsg}`;\n }\n TRACE(\"CPU\", label);\n return;\n }\n if (stack[i4].includes(\"TRACE_FUNC\")) {\n hasTraceFunc = true;\n }\n }\n };\n TRACE_FUNC_BEGIN = (extraMsg) => {\n if (typeof env.trace === \"undefined\" ? !env.wasm.trace : !env.trace) {\n return;\n }\n TRACE_FUNC(\"BEGIN\", extraMsg);\n };\n TRACE_FUNC_END = (extraMsg) => {\n if (typeof env.trace === \"undefined\" ? !env.wasm.trace : !env.trace) {\n return;\n }\n TRACE_FUNC(\"END\", extraMsg);\n };\n }\n });\n\n // node_modules/onnxruntime-common/dist/esm/inference-session-impl.js\n var InferenceSession;\n var init_inference_session_impl = __esm({\n \"node_modules/onnxruntime-common/dist/esm/inference-session-impl.js\"() {\n init_backend_impl();\n init_tensor();\n init_trace();\n InferenceSession = class _InferenceSession {\n constructor(handler) {\n this.handler = handler;\n }\n async run(feeds, arg1, arg2) {\n TRACE_FUNC_BEGIN();\n const fetches = {};\n let options = {};\n if (typeof feeds !== \"object\" || feeds === null || feeds instanceof Tensor2 || Array.isArray(feeds)) {\n throw new TypeError(\"'feeds' must be an object that use input names as keys and OnnxValue as corresponding values.\");\n }\n let isFetchesEmpty = true;\n if (typeof arg1 === \"object\") {\n if (arg1 === null) {\n throw new TypeError(\"Unexpected argument[1]: cannot be null.\");\n }\n if (arg1 instanceof Tensor2) {\n throw new TypeError(\"'fetches' cannot be a Tensor\");\n }\n if (Array.isArray(arg1)) {\n if (arg1.length === 0) {\n throw new TypeError(\"'fetches' cannot be an empty array.\");\n }\n isFetchesEmpty = false;\n for (const name of arg1) {\n if (typeof name !== \"string\") {\n throw new TypeError(\"'fetches' must be a string array or an object.\");\n }\n if (this.outputNames.indexOf(name) === -1) {\n throw new RangeError(`'fetches' contains invalid output name: ${name}.`);\n }\n fetches[name] = null;\n }\n if (typeof arg2 === \"object\" && arg2 !== null) {\n options = arg2;\n } else if (typeof arg2 !== \"undefined\") {\n throw new TypeError(\"'options' must be an object.\");\n }\n } else {\n let isFetches = false;\n const arg1Keys = Object.getOwnPropertyNames(arg1);\n for (const name of this.outputNames) {\n if (arg1Keys.indexOf(name) !== -1) {\n const v4 = arg1[name];\n if (v4 === null || v4 instanceof Tensor2) {\n isFetches = true;\n isFetchesEmpty = false;\n fetches[name] = v4;\n }\n }\n }\n if (isFetches) {\n if (typeof arg2 === \"object\" && arg2 !== null) {\n options = arg2;\n } else if (typeof arg2 !== \"undefined\") {\n throw new TypeError(\"'options' must be an object.\");\n }\n } else {\n options = arg1;\n }\n }\n } else if (typeof arg1 !== \"undefined\") {\n throw new TypeError(\"Unexpected argument[1]: must be 'fetches' or 'options'.\");\n }\n for (const name of this.inputNames) {\n if (typeof feeds[name] === \"undefined\") {\n throw new Error(`input '${name}' is missing in 'feeds'.`);\n }\n }\n if (isFetchesEmpty) {\n for (const name of this.outputNames) {\n fetches[name] = null;\n }\n }\n const results = await this.handler.run(feeds, fetches, options);\n const returnValue = {};\n for (const key in results) {\n if (Object.hasOwnProperty.call(results, key)) {\n const result = results[key];\n if (result instanceof Tensor2) {\n returnValue[key] = result;\n } else {\n returnValue[key] = new Tensor2(result.type, result.data, result.dims);\n }\n }\n }\n TRACE_FUNC_END();\n return returnValue;\n }\n async release() {\n return this.handler.dispose();\n }\n static async create(arg0, arg1, arg2, arg3) {\n TRACE_FUNC_BEGIN();\n let filePathOrUint8Array;\n let options = {};\n if (typeof arg0 === \"string\") {\n filePathOrUint8Array = arg0;\n if (typeof arg1 === \"object\" && arg1 !== null) {\n options = arg1;\n } else if (typeof arg1 !== \"undefined\") {\n throw new TypeError(\"'options' must be an object.\");\n }\n } else if (arg0 instanceof Uint8Array) {\n filePathOrUint8Array = arg0;\n if (typeof arg1 === \"object\" && arg1 !== null) {\n options = arg1;\n } else if (typeof arg1 !== \"undefined\") {\n throw new TypeError(\"'options' must be an object.\");\n }\n } else if (arg0 instanceof ArrayBuffer || typeof SharedArrayBuffer !== \"undefined\" && arg0 instanceof SharedArrayBuffer) {\n const buffer = arg0;\n let byteOffset = 0;\n let byteLength = arg0.byteLength;\n if (typeof arg1 === \"object\" && arg1 !== null) {\n options = arg1;\n } else if (typeof arg1 === \"number\") {\n byteOffset = arg1;\n if (!Number.isSafeInteger(byteOffset)) {\n throw new RangeError(\"'byteOffset' must be an integer.\");\n }\n if (byteOffset < 0 || byteOffset >= buffer.byteLength) {\n throw new RangeError(`'byteOffset' is out of range [0, ${buffer.byteLength}).`);\n }\n byteLength = arg0.byteLength - byteOffset;\n if (typeof arg2 === \"number\") {\n byteLength = arg2;\n if (!Number.isSafeInteger(byteLength)) {\n th