@gltf-transform/extensions
Version:
Adds extension support to @gltf-transform/core
1,238 lines (1,227 loc) • 209 kB
JavaScript
var core = require('@gltf-transform/core');
var ktxParse = require('ktx-parse');
const EXT_MESH_GPU_INSTANCING = 'EXT_mesh_gpu_instancing';
const EXT_MESHOPT_COMPRESSION = 'EXT_meshopt_compression';
const EXT_TEXTURE_WEBP = 'EXT_texture_webp';
const EXT_TEXTURE_AVIF = 'EXT_texture_avif';
const KHR_DRACO_MESH_COMPRESSION = 'KHR_draco_mesh_compression';
const KHR_LIGHTS_PUNCTUAL = 'KHR_lights_punctual';
const KHR_MATERIALS_ANISOTROPY = 'KHR_materials_anisotropy';
const KHR_MATERIALS_CLEARCOAT = 'KHR_materials_clearcoat';
const KHR_MATERIALS_DIFFUSE_TRANSMISSION = 'KHR_materials_diffuse_transmission';
const KHR_MATERIALS_DISPERSION = 'KHR_materials_dispersion';
const KHR_MATERIALS_EMISSIVE_STRENGTH = 'KHR_materials_emissive_strength';
const KHR_MATERIALS_IOR = 'KHR_materials_ior';
const KHR_MATERIALS_IRIDESCENCE = 'KHR_materials_iridescence';
const KHR_MATERIALS_PBR_SPECULAR_GLOSSINESS = 'KHR_materials_pbrSpecularGlossiness';
const KHR_MATERIALS_SHEEN = 'KHR_materials_sheen';
const KHR_MATERIALS_SPECULAR = 'KHR_materials_specular';
const KHR_MATERIALS_TRANSMISSION = 'KHR_materials_transmission';
const KHR_MATERIALS_UNLIT = 'KHR_materials_unlit';
const KHR_MATERIALS_VOLUME = 'KHR_materials_volume';
const KHR_MATERIALS_VARIANTS = 'KHR_materials_variants';
const KHR_MESH_QUANTIZATION = 'KHR_mesh_quantization';
const KHR_TEXTURE_BASISU = 'KHR_texture_basisu';
const KHR_TEXTURE_TRANSFORM = 'KHR_texture_transform';
const KHR_XMP_JSON_LD = 'KHR_xmp_json_ld';
// See BufferViewUsage in `writer-context.ts`.
const INSTANCE_ATTRIBUTE = 'INSTANCE_ATTRIBUTE';
/**
* Defines GPU instances of a {@link Mesh} under one {@link Node}. See {@link EXTMeshGPUInstancing}.
*/
class InstancedMesh extends core.ExtensionProperty {
init() {
this.extensionName = EXT_MESH_GPU_INSTANCING;
this.propertyType = 'InstancedMesh';
this.parentTypes = [core.PropertyType.NODE];
}
getDefaults() {
return Object.assign(super.getDefaults(), {
attributes: new core.RefMap()
});
}
/** Returns an instance attribute as an {@link Accessor}. */
getAttribute(semantic) {
return this.getRefMap('attributes', semantic);
}
/**
* Sets an instance attribute to an {@link Accessor}. All attributes must have the same
* instance count.
*/
setAttribute(semantic, accessor) {
return this.setRefMap('attributes', semantic, accessor, {
usage: INSTANCE_ATTRIBUTE
});
}
/**
* Lists all instance attributes {@link Accessor}s associated with the InstancedMesh. Order
* will be consistent with the order returned by {@link .listSemantics}().
*/
listAttributes() {
return this.listRefMapValues('attributes');
}
/**
* Lists all instance attribute semantics associated with the primitive. Order will be
* consistent with the order returned by {@link .listAttributes}().
*/
listSemantics() {
return this.listRefMapKeys('attributes');
}
}
InstancedMesh.EXTENSION_NAME = EXT_MESH_GPU_INSTANCING;
/**
* [`EXT_mesh_gpu_instancing`](https://github.com/KhronosGroup/glTF/tree/master/extensions/2.0/Vendor/EXT_mesh_gpu_instancing/)
* prepares mesh data for efficient GPU instancing.
*
* GPU instancing allows engines to render many copies of a single mesh at once using a small number
* of draw calls. Instancing is particularly useful for things like trees, grass, road signs, etc.
* Keep in mind that predefined batches, as used in this extension, may prevent frustum culling
* within a batch. Dividing batches into collocated cells may be preferable to using a single large
* batch.
*
* > _**NOTICE:** While this extension stores mesh data optimized for GPU instancing, it
* > is important to note that (1) GPU instancing and other optimizations are possible — and
* > encouraged — even without this extension, and (2) other common meanings of the term
* > "instancing" exist, distinct from this extension. See
* > [Appendix: Motivation and Purpose](https://github.com/KhronosGroup/glTF/tree/master/extensions/2.0/Vendor/EXT_mesh_gpu_instancing#appendix-motivation-and-purpose)
* > of the `EXT_mesh_gpu_instancing` specification._
*
* Properties:
* - {@link InstancedMesh}
*
* ### Example
*
* The `EXTMeshGPUInstancing` class provides a single {@link ExtensionProperty} type, `InstancedMesh`,
* which may be attached to any {@link Node} instance. For example:
*
* ```typescript
* import { EXTMeshGPUInstancing } from '@gltf-transform/extensions';
*
* // Create standard mesh, node, and scene hierarchy.
* // ...
*
* // Assign positions for each instance.
* const batchPositions = doc.createAccessor('instance_positions')
* .setArray(new Float32Array([
* 0, 0, 0,
* 1, 0, 0,
* 2, 0, 0,
* ]))
* .setType(Accessor.Type.VEC3)
* .setBuffer(buffer);
*
* // Assign IDs for each instance.
* const batchIDs = doc.createAccessor('instance_ids')
* .setArray(new Uint8Array([0, 1, 2]))
* .setType(Accessor.Type.SCALAR)
* .setBuffer(buffer);
*
* // Create an Extension attached to the Document.
* const batchExtension = document.createExtension(EXTMeshGPUInstancing)
* .setRequired(true);
* const batch = batchExtension.createInstancedMesh()
* .setAttribute('TRANSLATION', batchPositions)
* .setAttribute('_ID', batchIDs);
*
* node
* .setMesh(mesh)
* .setExtension('EXT_mesh_gpu_instancing', batch);
* ```
*
* Standard instance attributes are `TRANSLATION`, `ROTATION`, and `SCALE`, and support the accessor
* types allowed by the extension specification. Custom instance attributes are allowed, and should
* be prefixed with an underscore (`_*`).
*/
class EXTMeshGPUInstancing extends core.Extension {
constructor(...args) {
super(...args);
this.extensionName = EXT_MESH_GPU_INSTANCING;
/** @hidden */
this.provideTypes = [core.PropertyType.NODE];
/** @hidden */
this.prewriteTypes = [core.PropertyType.ACCESSOR];
}
/** Creates a new InstancedMesh property for use on a {@link Node}. */
createInstancedMesh() {
return new InstancedMesh(this.document.getGraph());
}
/** @hidden */
read(context) {
const jsonDoc = context.jsonDoc;
const nodeDefs = jsonDoc.json.nodes || [];
nodeDefs.forEach((nodeDef, nodeIndex) => {
if (!nodeDef.extensions || !nodeDef.extensions[EXT_MESH_GPU_INSTANCING]) return;
const instancedMeshDef = nodeDef.extensions[EXT_MESH_GPU_INSTANCING];
const instancedMesh = this.createInstancedMesh();
for (const semantic in instancedMeshDef.attributes) {
instancedMesh.setAttribute(semantic, context.accessors[instancedMeshDef.attributes[semantic]]);
}
context.nodes[nodeIndex].setExtension(EXT_MESH_GPU_INSTANCING, instancedMesh);
});
return this;
}
/** @hidden */
prewrite(context) {
// Set usage for instance attribute accessors, so they are stored in separate buffer
// views grouped by parent reference.
context.accessorUsageGroupedByParent.add(INSTANCE_ATTRIBUTE);
for (const prop of this.properties) {
for (const attribute of prop.listAttributes()) {
context.addAccessorToUsageGroup(attribute, INSTANCE_ATTRIBUTE);
}
}
return this;
}
/** @hidden */
write(context) {
const jsonDoc = context.jsonDoc;
this.document.getRoot().listNodes().forEach(node => {
const instancedMesh = node.getExtension(EXT_MESH_GPU_INSTANCING);
if (instancedMesh) {
const nodeIndex = context.nodeIndexMap.get(node);
const nodeDef = jsonDoc.json.nodes[nodeIndex];
const instancedMeshDef = {
attributes: {}
};
instancedMesh.listSemantics().forEach(semantic => {
const attribute = instancedMesh.getAttribute(semantic);
instancedMeshDef.attributes[semantic] = context.accessorIndexMap.get(attribute);
});
nodeDef.extensions = nodeDef.extensions || {};
nodeDef.extensions[EXT_MESH_GPU_INSTANCING] = instancedMeshDef;
}
});
return this;
}
}
EXTMeshGPUInstancing.EXTENSION_NAME = EXT_MESH_GPU_INSTANCING;
var EncoderMethod$1;
(function (EncoderMethod) {
EncoderMethod["QUANTIZE"] = "quantize";
EncoderMethod["FILTER"] = "filter";
})(EncoderMethod$1 || (EncoderMethod$1 = {}));
var MeshoptMode;
(function (MeshoptMode) {
MeshoptMode["ATTRIBUTES"] = "ATTRIBUTES";
MeshoptMode["TRIANGLES"] = "TRIANGLES";
MeshoptMode["INDICES"] = "INDICES";
})(MeshoptMode || (MeshoptMode = {}));
var MeshoptFilter;
(function (MeshoptFilter) {
/** No filter — quantize only. */
MeshoptFilter["NONE"] = "NONE";
/** Four 8- or 16-bit normalized values. */
MeshoptFilter["OCTAHEDRAL"] = "OCTAHEDRAL";
/** Four 16-bit normalized values. */
MeshoptFilter["QUATERNION"] = "QUATERNION";
/** K single-precision floating point values. */
MeshoptFilter["EXPONENTIAL"] = "EXPONENTIAL";
})(MeshoptFilter || (MeshoptFilter = {}));
/**
* Returns true for a fallback buffer, else false.
*
* - All references to the fallback buffer must come from bufferViews that
* have a EXT_meshopt_compression extension specified.
* - No references to the fallback buffer may come from
* EXT_meshopt_compression extension JSON.
*/
function isFallbackBuffer(bufferDef) {
if (!bufferDef.extensions || !bufferDef.extensions[EXT_MESHOPT_COMPRESSION]) return false;
const fallbackDef = bufferDef.extensions[EXT_MESHOPT_COMPRESSION];
return !!fallbackDef.fallback;
}
const {
BYTE,
SHORT,
FLOAT
} = core.Accessor.ComponentType;
const {
encodeNormalizedInt,
decodeNormalizedInt
} = core.MathUtils;
/** Pre-processes array with required filters or padding. */
function prepareAccessor(accessor, encoder, mode, filterOptions) {
const {
filter,
bits
} = filterOptions;
const result = {
array: accessor.getArray(),
byteStride: accessor.getElementSize() * accessor.getComponentSize(),
componentType: accessor.getComponentType(),
normalized: accessor.getNormalized()
};
if (mode !== MeshoptMode.ATTRIBUTES) return result;
if (filter !== MeshoptFilter.NONE) {
let array = accessor.getNormalized() ? decodeNormalizedIntArray(accessor) : new Float32Array(result.array);
switch (filter) {
case MeshoptFilter.EXPONENTIAL:
// → K single-precision floating point values.
result.byteStride = accessor.getElementSize() * 4;
result.componentType = FLOAT;
result.normalized = false;
result.array = encoder.encodeFilterExp(array, accessor.getCount(), result.byteStride, bits);
break;
case MeshoptFilter.OCTAHEDRAL:
// → four 8- or 16-bit normalized values.
result.byteStride = bits > 8 ? 8 : 4;
result.componentType = bits > 8 ? SHORT : BYTE;
result.normalized = true;
array = accessor.getElementSize() === 3 ? padNormals(array) : array;
result.array = encoder.encodeFilterOct(array, accessor.getCount(), result.byteStride, bits);
break;
case MeshoptFilter.QUATERNION:
// → four 16-bit normalized values.
result.byteStride = 8;
result.componentType = SHORT;
result.normalized = true;
result.array = encoder.encodeFilterQuat(array, accessor.getCount(), result.byteStride, bits);
break;
default:
throw new Error('Invalid filter.');
}
result.min = accessor.getMin([]);
result.max = accessor.getMax([]);
if (accessor.getNormalized()) {
result.min = result.min.map(v => decodeNormalizedInt(v, accessor.getComponentType()));
result.max = result.max.map(v => decodeNormalizedInt(v, accessor.getComponentType()));
}
if (result.normalized) {
result.min = result.min.map(v => encodeNormalizedInt(v, result.componentType));
result.max = result.max.map(v => encodeNormalizedInt(v, result.componentType));
}
} else if (result.byteStride % 4) {
result.array = padArrayElements(result.array, accessor.getElementSize());
result.byteStride = result.array.byteLength / accessor.getCount();
}
return result;
}
function decodeNormalizedIntArray(attribute) {
const componentType = attribute.getComponentType();
const srcArray = attribute.getArray();
const dstArray = new Float32Array(srcArray.length);
for (let i = 0; i < srcArray.length; i++) {
dstArray[i] = decodeNormalizedInt(srcArray[i], componentType);
}
return dstArray;
}
/** Pads array to 4 byte alignment, required for Meshopt ATTRIBUTE buffer views. */
function padArrayElements(srcArray, elementSize) {
const byteStride = core.BufferUtils.padNumber(srcArray.BYTES_PER_ELEMENT * elementSize);
const elementStride = byteStride / srcArray.BYTES_PER_ELEMENT;
const elementCount = srcArray.length / elementSize;
const dstArray = new srcArray.constructor(elementCount * elementStride);
for (let i = 0; i * elementSize < srcArray.length; i++) {
for (let j = 0; j < elementSize; j++) {
dstArray[i * elementStride + j] = srcArray[i * elementSize + j];
}
}
return dstArray;
}
/** Pad normals with a .w component for octahedral encoding. */
function padNormals(srcArray) {
const dstArray = new Float32Array(srcArray.length * 4 / 3);
for (let i = 0, il = srcArray.length / 3; i < il; i++) {
dstArray[i * 4] = srcArray[i * 3];
dstArray[i * 4 + 1] = srcArray[i * 3 + 1];
dstArray[i * 4 + 2] = srcArray[i * 3 + 2];
}
return dstArray;
}
function getMeshoptMode(accessor, usage) {
if (usage === core.WriterContext.BufferViewUsage.ELEMENT_ARRAY_BUFFER) {
const isTriangles = accessor.listParents().some(parent => {
return parent instanceof core.Primitive && parent.getMode() === core.Primitive.Mode.TRIANGLES;
});
return isTriangles ? MeshoptMode.TRIANGLES : MeshoptMode.INDICES;
}
return MeshoptMode.ATTRIBUTES;
}
function getMeshoptFilter(accessor, doc) {
const refs = doc.getGraph().listParentEdges(accessor).filter(edge => !(edge.getParent() instanceof core.Root));
for (const ref of refs) {
const refName = ref.getName();
const refKey = ref.getAttributes().key || '';
const isDelta = ref.getParent().propertyType === core.PropertyType.PRIMITIVE_TARGET;
// Indices.
if (refName === 'indices') return {
filter: MeshoptFilter.NONE
};
// Attributes.
//
// NOTES:
// - Vertex attributes should be filtered IFF they are _not_ quantized in
// 'packages/cli/src/transforms/meshopt.ts'.
// - POSITION and TEXCOORD_0 could use exponential filtering, but this produces broken
// output in some cases (e.g. Matilda.glb), for unknown reasons. gltfpack uses manual
// quantization for these attributes.
// - NORMAL and TANGENT attributes use Octahedral filters, but deltas in morphs do not.
// - When specifying bit depth for vertex attributes, check the defaults in `quantize.ts`
// and overrides in `meshopt.ts`. Don't store deltas at higher precision than base.
if (refName === 'attributes') {
if (refKey === 'POSITION') return {
filter: MeshoptFilter.NONE
};
if (refKey === 'TEXCOORD_0') return {
filter: MeshoptFilter.NONE
};
if (refKey.startsWith('JOINTS_')) return {
filter: MeshoptFilter.NONE
};
if (refKey.startsWith('WEIGHTS_')) return {
filter: MeshoptFilter.NONE
};
if (refKey === 'NORMAL' || refKey === 'TANGENT') {
return isDelta ? {
filter: MeshoptFilter.NONE
} : {
filter: MeshoptFilter.OCTAHEDRAL,
bits: 8
};
}
}
// Animation.
if (refName === 'output') {
const targetPath = getTargetPath(accessor);
if (targetPath === 'rotation') return {
filter: MeshoptFilter.QUATERNION,
bits: 16
};
if (targetPath === 'translation') return {
filter: MeshoptFilter.EXPONENTIAL,
bits: 12
};
if (targetPath === 'scale') return {
filter: MeshoptFilter.EXPONENTIAL,
bits: 12
};
return {
filter: MeshoptFilter.NONE
};
}
// See: https://github.com/donmccurdy/glTF-Transform/issues/489
if (refName === 'input') return {
filter: MeshoptFilter.NONE
};
if (refName === 'inverseBindMatrices') return {
filter: MeshoptFilter.NONE
};
}
return {
filter: MeshoptFilter.NONE
};
}
function getTargetPath(accessor) {
for (const sampler of accessor.listParents()) {
if (!(sampler instanceof core.AnimationSampler)) continue;
for (const channel of sampler.listParents()) {
if (!(channel instanceof core.AnimationChannel)) continue;
return channel.getTargetPath();
}
}
return null;
}
const DEFAULT_ENCODER_OPTIONS$1 = {
method: EncoderMethod$1.QUANTIZE
};
/**
* [`EXT_meshopt_compression`](https://github.com/KhronosGroup/gltf/blob/main/extensions/2.0/Vendor/EXT_meshopt_compression/)
* provides compression and fast decoding for geometry, morph targets, and animations.
*
* Meshopt compression (based on the [meshoptimizer](https://github.com/zeux/meshoptimizer)
* library) offers a lightweight decoder with very fast runtime decompression, and is
* appropriate for models of any size. Meshopt can reduce the transmission sizes of geometry,
* morph targets, animation, and other numeric data stored in buffer views. When textures are
* large, other complementary compression methods should be used as well.
*
* For the full benefits of meshopt compression, **apply gzip, brotli, or another lossless
* compression method** to the resulting .glb, .gltf, or .bin files. Meshopt specifically
* pre-optimizes assets for this purpose — without this secondary compression, the size
* reduction is considerably less.
*
* Be aware that decompression happens before uploading to the GPU. While Meshopt decoding is
* considerably faster than Draco decoding, neither compression method will improve runtime
* performance directly. To improve framerate, you'll need to simplify the geometry by reducing
* vertex count or draw calls — not just compress it. Finally, be aware that Meshopt compression is
* lossy: repeatedly compressing and decompressing a model in a pipeline will lose precision, so
* compression should generally be the last stage of an art workflow, and uncompressed original
* files should be kept.
*
* The meshoptimizer library ([github](https://github.com/zeux/meshoptimizer/tree/master/js),
* [npm](https://www.npmjs.com/package/meshoptimizer)) is a required dependency for reading or
* writing files, and must be provided by the application. Compression may alternatively be applied
* with the [gltfpack](https://github.com/zeux/meshoptimizer/tree/master/gltf) tool.
*
* ### Example — Read
*
* To read glTF files using Meshopt compression, ensure that the extension
* and a decoder are registered. Geometry and other data are decompressed
* while reading the file.
*
* ```typescript
* import { NodeIO } from '@gltf-transform/core';
* import { EXTMeshoptCompression } from '@gltf-transform/extensions';
* import { MeshoptDecoder } from 'meshoptimizer';
*
* await MeshoptDecoder.ready;
*
* const io = new NodeIO()
* .registerExtensions([EXTMeshoptCompression])
* .registerDependencies({ 'meshopt.decoder': MeshoptDecoder });
*
* // Read and decode.
* const document = await io.read('compressed.glb');
* ```
*
* ### Example — Write
*
* The simplest way to apply Meshopt compression is with the {@link meshopt}
* transform. The extension and an encoder must be registered.
*
* ```typescript
* import { NodeIO } from '@gltf-transform/core';
* import { EXTMeshoptCompression } from '@gltf-transform/extensions';
* import { meshopt } from '@gltf-transform/functions';
* import { MeshoptEncoder } from 'meshoptimizer';
*
* await MeshoptEncoder.ready;
*
* const io = new NodeIO()
* .registerExtensions([EXTMeshoptCompression])
* .registerDependencies({ 'meshopt.encoder': MeshoptEncoder });
*
* await document.transform(
* meshopt({encoder: MeshoptEncoder, level: 'medium'})
* );
*
* await io.write('compressed-medium.glb', document);
* ```
*
* ### Example — Advanced
*
* Internally, the {@link meshopt} transform reorders and quantizes vertex data
* to preparate for compression. If you prefer different pre-processing, the
* EXTMeshoptCompression extension can be added to the document manually:
*
* ```typescript
* import { reorder, quantize } from '@gltf-transform/functions';
* import { EXTMeshoptCompression } from '@gltf-transform/extensions';
* import { MeshoptEncoder } from 'meshoptimizer';
*
* await document.transform(
* reorder({encoder: MeshoptEncoder}),
* quantize()
* );
*
* document.createExtension(EXTMeshoptCompression)
* .setRequired(true)
* .setEncoderOptions({ method: EXTMeshoptCompression.EncoderMethod.QUANTIZE });
* ```
*
* In either case, compression is deferred until generating output with an I/O
* class.
*/
class EXTMeshoptCompression extends core.Extension {
constructor(...args) {
super(...args);
this.extensionName = EXT_MESHOPT_COMPRESSION;
/** @hidden */
this.prereadTypes = [core.PropertyType.BUFFER, core.PropertyType.PRIMITIVE];
/** @hidden */
this.prewriteTypes = [core.PropertyType.BUFFER, core.PropertyType.ACCESSOR];
/** @hidden */
this.readDependencies = ['meshopt.decoder'];
/** @hidden */
this.writeDependencies = ['meshopt.encoder'];
this._decoder = null;
this._decoderFallbackBufferMap = new Map();
this._encoder = null;
this._encoderOptions = DEFAULT_ENCODER_OPTIONS$1;
this._encoderFallbackBuffer = null;
this._encoderBufferViews = {};
this._encoderBufferViewData = {};
this._encoderBufferViewAccessors = {};
}
/** @hidden */
install(key, dependency) {
if (key === 'meshopt.decoder') {
this._decoder = dependency;
}
if (key === 'meshopt.encoder') {
this._encoder = dependency;
}
return this;
}
/**
* Configures Meshopt options for quality/compression tuning. The two methods rely on different
* pre-processing before compression, and should be compared on the basis of (a) quality/loss
* and (b) final asset size after _also_ applying a lossless compression such as gzip or brotli.
*
* - QUANTIZE: Default. Pre-process with {@link quantize quantize()} (lossy to specified
* precision) before applying lossless Meshopt compression. Offers a considerable compression
* ratio with or without further supercompression. Equivalent to `gltfpack -c`.
* - FILTER: Pre-process with lossy filters to improve compression, before applying lossless
* Meshopt compression. While output may initially be larger than with the QUANTIZE method,
* this method will benefit more from supercompression (e.g. gzip or brotli). Equivalent to
* `gltfpack -cc`.
*
* Output with the FILTER method will generally be smaller after supercompression (e.g. gzip or
* brotli) is applied, but may be larger than QUANTIZE output without it. Decoding is very fast
* with both methods.
*
* Example:
*
* ```ts
* import { EXTMeshoptCompression } from '@gltf-transform/extensions';
*
* doc.createExtension(EXTMeshoptCompression)
* .setRequired(true)
* .setEncoderOptions({
* method: EXTMeshoptCompression.EncoderMethod.QUANTIZE
* });
* ```
*/
setEncoderOptions(options) {
this._encoderOptions = {
...DEFAULT_ENCODER_OPTIONS$1,
...options
};
return this;
}
/**********************************************************************************************
* Decoding.
*/
/** @internal Checks preconditions, decodes buffer views, and creates decoded primitives. */
preread(context, propertyType) {
if (!this._decoder) {
if (!this.isRequired()) return this;
throw new Error(`[${EXT_MESHOPT_COMPRESSION}] Please install extension dependency, "meshopt.decoder".`);
}
if (!this._decoder.supported) {
if (!this.isRequired()) return this;
throw new Error(`[${EXT_MESHOPT_COMPRESSION}]: Missing WASM support.`);
}
if (propertyType === core.PropertyType.BUFFER) {
this._prereadBuffers(context);
} else if (propertyType === core.PropertyType.PRIMITIVE) {
this._prereadPrimitives(context);
}
return this;
}
/** @internal Decode buffer views. */
_prereadBuffers(context) {
const jsonDoc = context.jsonDoc;
const viewDefs = jsonDoc.json.bufferViews || [];
viewDefs.forEach((viewDef, index) => {
if (!viewDef.extensions || !viewDef.extensions[EXT_MESHOPT_COMPRESSION]) return;
const meshoptDef = viewDef.extensions[EXT_MESHOPT_COMPRESSION];
const byteOffset = meshoptDef.byteOffset || 0;
const byteLength = meshoptDef.byteLength || 0;
const count = meshoptDef.count;
const stride = meshoptDef.byteStride;
const result = new Uint8Array(count * stride);
const bufferDef = jsonDoc.json.buffers[meshoptDef.buffer];
// TODO(cleanup): Should be encapsulated in writer-context.ts.
const resource = bufferDef.uri ? jsonDoc.resources[bufferDef.uri] : jsonDoc.resources[core.GLB_BUFFER];
const source = core.BufferUtils.toView(resource, byteOffset, byteLength);
this._decoder.decodeGltfBuffer(result, count, stride, source, meshoptDef.mode, meshoptDef.filter);
context.bufferViews[index] = result;
});
}
/**
* Mark fallback buffers and replacements.
*
* Note: Alignment with primitives is arbitrary; this just needs to happen
* after Buffers have been parsed.
* @internal
*/
_prereadPrimitives(context) {
const jsonDoc = context.jsonDoc;
const viewDefs = jsonDoc.json.bufferViews || [];
//
viewDefs.forEach(viewDef => {
if (!viewDef.extensions || !viewDef.extensions[EXT_MESHOPT_COMPRESSION]) return;
const meshoptDef = viewDef.extensions[EXT_MESHOPT_COMPRESSION];
const buffer = context.buffers[meshoptDef.buffer];
const fallbackBuffer = context.buffers[viewDef.buffer];
const fallbackBufferDef = jsonDoc.json.buffers[viewDef.buffer];
if (isFallbackBuffer(fallbackBufferDef)) {
this._decoderFallbackBufferMap.set(fallbackBuffer, buffer);
}
});
}
/** @hidden Removes Fallback buffers, if extension is required. */
read(_context) {
if (!this.isRequired()) return this;
// Replace fallback buffers.
for (const [fallbackBuffer, buffer] of this._decoderFallbackBufferMap) {
for (const parent of fallbackBuffer.listParents()) {
if (parent instanceof core.Accessor) {
parent.swap(fallbackBuffer, buffer);
}
}
fallbackBuffer.dispose();
}
return this;
}
/**********************************************************************************************
* Encoding.
*/
/** @internal Claims accessors that can be compressed and writes compressed buffer views. */
prewrite(context, propertyType) {
if (propertyType === core.PropertyType.ACCESSOR) {
this._prewriteAccessors(context);
} else if (propertyType === core.PropertyType.BUFFER) {
this._prewriteBuffers(context);
}
return this;
}
/** @internal Claims accessors that can be compressed. */
_prewriteAccessors(context) {
const json = context.jsonDoc.json;
const encoder = this._encoder;
const options = this._encoderOptions;
const graph = this.document.getGraph();
const fallbackBuffer = this.document.createBuffer(); // Disposed on write.
const fallbackBufferIndex = this.document.getRoot().listBuffers().indexOf(fallbackBuffer);
let nextID = 1;
const parentToID = new Map();
const getParentID = property => {
for (const parent of graph.listParents(property)) {
if (parent.propertyType === core.PropertyType.ROOT) continue;
let id = parentToID.get(property);
if (id === undefined) parentToID.set(property, id = nextID++);
return id;
}
return -1;
};
this._encoderFallbackBuffer = fallbackBuffer;
this._encoderBufferViews = {};
this._encoderBufferViewData = {};
this._encoderBufferViewAccessors = {};
for (const accessor of this.document.getRoot().listAccessors()) {
// See: https://github.com/donmccurdy/glTF-Transform/pull/323#issuecomment-898791251
// Example: https://skfb.ly/6qAD8
if (getTargetPath(accessor) === 'weights') continue;
// See: https://github.com/donmccurdy/glTF-Transform/issues/289
if (accessor.getSparse()) continue;
const usage = context.getAccessorUsage(accessor);
const parentID = context.accessorUsageGroupedByParent.has(usage) ? getParentID(accessor) : null;
const mode = getMeshoptMode(accessor, usage);
const filter = options.method === EncoderMethod$1.FILTER ? getMeshoptFilter(accessor, this.document) : {
filter: MeshoptFilter.NONE
};
const preparedAccessor = prepareAccessor(accessor, encoder, mode, filter);
const {
array,
byteStride
} = preparedAccessor;
const buffer = accessor.getBuffer();
if (!buffer) throw new Error(`${EXT_MESHOPT_COMPRESSION}: Missing buffer for accessor.`);
const bufferIndex = this.document.getRoot().listBuffers().indexOf(buffer);
// Buffer view grouping key.
const key = [usage, parentID, mode, filter.filter, byteStride, bufferIndex].join(':');
let bufferView = this._encoderBufferViews[key];
let bufferViewData = this._encoderBufferViewData[key];
let bufferViewAccessors = this._encoderBufferViewAccessors[key];
// Write new buffer view, if needed.
if (!bufferView || !bufferViewData) {
bufferViewAccessors = this._encoderBufferViewAccessors[key] = [];
bufferViewData = this._encoderBufferViewData[key] = [];
bufferView = this._encoderBufferViews[key] = {
buffer: fallbackBufferIndex,
target: core.WriterContext.USAGE_TO_TARGET[usage],
byteOffset: 0,
byteLength: 0,
byteStride: usage === core.WriterContext.BufferViewUsage.ARRAY_BUFFER ? byteStride : undefined,
extensions: {
[EXT_MESHOPT_COMPRESSION]: {
buffer: bufferIndex,
byteOffset: 0,
byteLength: 0,
mode: mode,
filter: filter.filter !== MeshoptFilter.NONE ? filter.filter : undefined,
byteStride: byteStride,
count: 0
}
}
};
}
// Write accessor.
const accessorDef = context.createAccessorDef(accessor);
accessorDef.componentType = preparedAccessor.componentType;
accessorDef.normalized = preparedAccessor.normalized;
accessorDef.byteOffset = bufferView.byteLength;
if (accessorDef.min && preparedAccessor.min) accessorDef.min = preparedAccessor.min;
if (accessorDef.max && preparedAccessor.max) accessorDef.max = preparedAccessor.max;
context.accessorIndexMap.set(accessor, json.accessors.length);
json.accessors.push(accessorDef);
bufferViewAccessors.push(accessorDef);
// Update buffer view.
bufferViewData.push(new Uint8Array(array.buffer, array.byteOffset, array.byteLength));
bufferView.byteLength += array.byteLength;
bufferView.extensions.EXT_meshopt_compression.count += accessor.getCount();
}
}
/** @internal Writes compressed buffer views. */
_prewriteBuffers(context) {
const encoder = this._encoder;
for (const key in this._encoderBufferViews) {
const bufferView = this._encoderBufferViews[key];
const bufferViewData = this._encoderBufferViewData[key];
const buffer = this.document.getRoot().listBuffers()[bufferView.extensions[EXT_MESHOPT_COMPRESSION].buffer];
const otherBufferViews = context.otherBufferViews.get(buffer) || [];
const {
count,
byteStride,
mode
} = bufferView.extensions[EXT_MESHOPT_COMPRESSION];
const srcArray = core.BufferUtils.concat(bufferViewData);
const dstArray = encoder.encodeGltfBuffer(srcArray, count, byteStride, mode);
const compressedData = core.BufferUtils.pad(dstArray);
bufferView.extensions[EXT_MESHOPT_COMPRESSION].byteLength = dstArray.byteLength;
bufferViewData.length = 0;
bufferViewData.push(compressedData);
otherBufferViews.push(compressedData);
context.otherBufferViews.set(buffer, otherBufferViews);
}
}
/** @hidden Puts encoded data into glTF output. */
write(context) {
let fallbackBufferByteOffset = 0;
// Write final encoded buffer view properties.
for (const key in this._encoderBufferViews) {
const bufferView = this._encoderBufferViews[key];
const bufferViewData = this._encoderBufferViewData[key][0];
const bufferViewIndex = context.otherBufferViewsIndexMap.get(bufferViewData);
const bufferViewAccessors = this._encoderBufferViewAccessors[key];
for (const accessorDef of bufferViewAccessors) {
accessorDef.bufferView = bufferViewIndex;
}
const finalBufferViewDef = context.jsonDoc.json.bufferViews[bufferViewIndex];
const compressedByteOffset = finalBufferViewDef.byteOffset || 0;
Object.assign(finalBufferViewDef, bufferView);
finalBufferViewDef.byteOffset = fallbackBufferByteOffset;
const bufferViewExtensionDef = finalBufferViewDef.extensions[EXT_MESHOPT_COMPRESSION];
bufferViewExtensionDef.byteOffset = compressedByteOffset;
fallbackBufferByteOffset += core.BufferUtils.padNumber(bufferView.byteLength);
}
// Write final fallback buffer.
const fallbackBuffer = this._encoderFallbackBuffer;
const fallbackBufferIndex = context.bufferIndexMap.get(fallbackBuffer);
const fallbackBufferDef = context.jsonDoc.json.buffers[fallbackBufferIndex];
fallbackBufferDef.byteLength = fallbackBufferByteOffset;
fallbackBufferDef.extensions = {
[EXT_MESHOPT_COMPRESSION]: {
fallback: true
}
};
fallbackBuffer.dispose();
return this;
}
}
EXTMeshoptCompression.EXTENSION_NAME = EXT_MESHOPT_COMPRESSION;
EXTMeshoptCompression.EncoderMethod = EncoderMethod$1;
class AVIFImageUtils {
match(array) {
return array.length >= 12 && core.BufferUtils.decodeText(array.slice(4, 12)) === 'ftypavif';
}
/**
* Probes size of AVIF or HEIC image. Assumes a single static image, without
* orientation or other metadata that would affect dimensions.
*/
getSize(array) {
if (!this.match(array)) return null;
// References:
// - https://stackoverflow.com/questions/66222773/how-to-get-image-dimensions-from-an-avif-file
// - https://github.com/nodeca/probe-image-size/blob/master/lib/parse_sync/avif.js
const view = new DataView(array.buffer, array.byteOffset, array.byteLength);
let box = unbox(view, 0);
if (!box) return null;
let offset = box.end;
while (box = unbox(view, offset)) {
if (box.type === 'meta') {
offset = box.start + 4; // version + flags
} else if (box.type === 'iprp' || box.type === 'ipco') {
offset = box.start;
} else if (box.type === 'ispe') {
return [view.getUint32(box.start + 4), view.getUint32(box.start + 8)];
} else if (box.type === 'mdat') {
break; // mdat should be last, unlikely to find metadata past here.
} else {
offset = box.end;
}
}
return null;
}
getChannels(_buffer) {
return 4;
}
}
/**
* [`EXT_texture_avif`](https://github.com/KhronosGroup/glTF/tree/master/extensions/2.0/Vendor/EXT_texture_avif/)
* enables AVIF images for any material texture.
*
* AVIF offers greatly reduced transmission size, but
* [requires browser support](https://caniuse.com/avif). Like PNG and JPEG, an AVIF image is
* *fully decompressed* when uploaded to the GPU, which increases upload time and GPU memory cost.
* For seamless uploads and minimal GPU memory cost, it is necessary to use a GPU texture format
* like Basis Universal, with the `KHR_texture_basisu` extension.
*
* Defining no {@link ExtensionProperty} types, this {@link Extension} is simply attached to the
* {@link Document}, and affects the entire Document by allowing use of the `image/avif` MIME type
* and passing AVIF image data to the {@link Texture.setImage} method. Without the Extension, the
* same MIME types and image data would yield an invalid glTF document, under the stricter core glTF
* specification.
*
* Properties:
* - N/A
*
* ### Example
*
* ```typescript
* import { TextureAVIF } from '@gltf-transform/extensions';
*
* // Create an Extension attached to the Document.
* const avifExtension = document.createExtension(TextureAVIF)
* .setRequired(true);
* document.createTexture('MyAVIFTexture')
* .setMimeType('image/avif')
* .setImage(fs.readFileSync('my-texture.avif'));
* ```
*
* AVIF conversion is not done automatically when adding the extension as shown above — you must
* convert the image data first, then pass the `.avif` payload to {@link Texture.setImage}.
*
* When the `EXT_texture_avif` extension is added to a file by glTF-Transform, the extension should
* always be required. This tool does not support writing assets that "fall back" to optional PNG or
* JPEG image data.
*/
class EXTTextureAVIF extends core.Extension {
constructor(...args) {
super(...args);
this.extensionName = EXT_TEXTURE_AVIF;
/** @hidden */
this.prereadTypes = [core.PropertyType.TEXTURE];
}
/** @hidden */
static register() {
core.ImageUtils.registerFormat('image/avif', new AVIFImageUtils());
}
/** @hidden */
preread(context) {
const textureDefs = context.jsonDoc.json.textures || [];
textureDefs.forEach(textureDef => {
if (textureDef.extensions && textureDef.extensions[EXT_TEXTURE_AVIF]) {
textureDef.source = textureDef.extensions[EXT_TEXTURE_AVIF].source;
}
});
return this;
}
/** @hidden */
read(_context) {
return this;
}
/** @hidden */
write(context) {
const jsonDoc = context.jsonDoc;
this.document.getRoot().listTextures().forEach(texture => {
if (texture.getMimeType() === 'image/avif') {
const imageIndex = context.imageIndexMap.get(texture);
const textureDefs = jsonDoc.json.textures || [];
textureDefs.forEach(textureDef => {
if (textureDef.source === imageIndex) {
textureDef.extensions = textureDef.extensions || {};
textureDef.extensions[EXT_TEXTURE_AVIF] = {
source: textureDef.source
};
delete textureDef.source;
}
});
}
});
return this;
}
}
EXTTextureAVIF.EXTENSION_NAME = EXT_TEXTURE_AVIF;
function unbox(data, offset) {
if (data.byteLength < 4 + offset) return null;
// size includes first 4 bytes (length)
const size = data.getUint32(offset);
if (data.byteLength < size + offset || size < 8) return null;
return {
type: core.BufferUtils.decodeText(new Uint8Array(data.buffer, data.byteOffset + offset + 4, 4)),
start: offset + 8,
end: offset + size
};
}
class WEBPImageUtils {
match(array) {
return array.length >= 12 && array[8] === 87 && array[9] === 69 && array[10] === 66 && array[11] === 80;
}
getSize(array) {
// Reference: http://tools.ietf.org/html/rfc6386
const RIFF = core.BufferUtils.decodeText(array.slice(0, 4));
const WEBP = core.BufferUtils.decodeText(array.slice(8, 12));
if (RIFF !== 'RIFF' || WEBP !== 'WEBP') return null;
const view = new DataView(array.buffer, array.byteOffset);
// Reference: https://wiki.tcl-lang.org/page/Reading+WEBP+image+dimensions
let offset = 12;
while (offset < view.byteLength) {
const chunkId = core.BufferUtils.decodeText(new Uint8Array([view.getUint8(offset), view.getUint8(offset + 1), view.getUint8(offset + 2), view.getUint8(offset + 3)]));
const chunkByteLength = view.getUint32(offset + 4, true);
if (chunkId === 'VP8 ') {
const width = view.getInt16(offset + 14, true) & 0x3fff;
const height = view.getInt16(offset + 16, true) & 0x3fff;
return [width, height];
} else if (chunkId === 'VP8L') {
const b0 = view.getUint8(offset + 9);
const b1 = view.getUint8(offset + 10);
const b2 = view.getUint8(offset + 11);
const b3 = view.getUint8(offset + 12);
const width = 1 + ((b1 & 0x3f) << 8 | b0);
const height = 1 + ((b3 & 0xf) << 10 | b2 << 2 | (b1 & 0xc0) >> 6);
return [width, height];
}
offset += 8 + chunkByteLength + chunkByteLength % 2;
}
return null;
}
getChannels(_buffer) {
return 4;
}
}
/**
* [`EXT_texture_webp`](https://github.com/KhronosGroup/glTF/tree/master/extensions/2.0/Vendor/EXT_texture_webp/)
* enables WebP images for any material texture.
*
* WebP offers greatly reduced transmission size, but
* [requires browser support](https://caniuse.com/webp). Like PNG and JPEG, a WebP image is
* *fully decompressed* when uploaded to the GPU, which increases upload time and GPU memory cost.
* For seamless uploads and minimal GPU memory cost, it is necessary to use a GPU texture format
* like Basis Universal, with the `KHR_texture_basisu` extension.
*
* Defining no {@link ExtensionProperty} types, this {@link Extension} is simply attached to the
* {@link Document}, and affects the entire Document by allowing use of the `image/webp` MIME type
* and passing WebP image data to the {@link Texture.setImage} method. Without the Extension, the
* same MIME types and image data would yield an invalid glTF document, under the stricter core glTF
* specification.
*
* Properties:
* - N/A
*
* ### Example
*
* ```typescript
* import { EXTTextureWebP } from '@gltf-transform/extensions';
*
* // Create an Extension attached to the Document.
* const webpExtension = document.createExtension(EXTTextureWebP)
* .setRequired(true);
* document.createTexture('MyWebPTexture')
* .setMimeType('image/webp')
* .setImage(fs.readFileSync('my-texture.webp'));
* ```
*
* WebP conversion is not done automatically when adding the extension as shown above — you must
* convert the image data first, then pass the `.webp` payload to {@link Texture.setImage}.
*
* When the `EXT_texture_webp` extension is added to a file by glTF-Transform, the extension should
* always be required. This tool does not support writing assets that "fall back" to optional PNG or
* JPEG image data.
*/
class EXTTextureWebP extends core.Extension {
constructor(...args) {
super(...args);
this.extensionName = EXT_TEXTURE_WEBP;
/** @hidden */
this.prereadTypes = [core.PropertyType.TEXTURE];
}
/** @hidden */
static register() {
core.ImageUtils.registerFormat('image/webp', new WEBPImageUtils());
}
/** @hidden */
preread(context) {
const textureDefs = context.jsonDoc.json.textures || [];
textureDefs.forEach(textureDef => {
if (textureDef.extensions && textureDef.extensions[EXT_TEXTURE_WEBP]) {
textureDef.source = textureDef.extensions[EXT_TEXTURE_WEBP].source;
}
});
return this;
}
/** @hidden */
read(_context) {
return this;
}
/** @hidden */
write(context) {
const jsonDoc = context.jsonDoc;
this.document.getRoot().listTextures().forEach(texture => {
if (texture.getMimeType() === 'image/webp') {
const imageIndex = context.imageIndexMap.get(texture);
const textureDefs = jsonDoc.json.textures || [];
textureDefs.forEach(textureDef => {
if (textureDef.source === imageIndex) {
textureDef.extensions = textureDef.extensions || {};
textureDef.extensions[EXT_TEXTURE_WEBP] = {
source: textureDef.source
};
delete textureDef.source;
}
});
}
});
return this;
}
}
EXTTextureWebP.EXTENSION_NAME = EXT_TEXTURE_WEBP;
let decoderModule;
// Initialized when decoder module loads.
let COMPONENT_ARRAY;
let DATA_TYPE;
function decodeGeometry(decoder, data) {
const buffer = new decoderModule.DecoderBuffer();
try {
buffer.Init(data, data.length);
const geometryType = decoder.GetEncodedGeometryType(buffer);
if (geometryType !== decoderModule.TRIANGULAR_MESH) {
throw new Error(`[${KHR_DRACO_MESH_COMPRESSION}] Unknown geometry type.`);
}
const dracoMesh = new decoderModule.Mesh();
const status = decoder.DecodeBufferToMesh(buffer, dracoMesh);
if (!status.ok() || dracoMesh.ptr === 0) {
throw new Error(`[${KHR_DRACO_MESH_COMPRESSION}] Decoding failure.`);
}
return dracoMesh;
} finally {
decoderModule.destroy(buffer);
}
}
function decodeIndex(decoder, mesh) {
const numFaces = mesh.num_faces();
const numIndices = numFaces * 3;
let ptr;
let indices;
if (mesh.num_points() <= 65534) {
const byteLength = numIndices * Uint16Array.BYTES_PER_ELEMENT;
ptr = decoderModule._malloc(byteLength);
decoder.GetTrianglesUInt16Array(mesh, byteLength, ptr);
indices = new Uint16Array(decoderModule.HEAPU16.buffer, ptr, numIndices).slice();
} else {
const byteLength = numIndices * Uint32Array.BYTES_PER_ELEMENT;
ptr = decoderModule._malloc(byteLength);
decoder.GetTrianglesUInt32Array(mesh, byteLength, ptr);
indices = new Uint32Array(decoderModule.HEAPU32.buffer, ptr, numIndices).slice();
}
decoderModule._free(ptr);
return indices;
}
function decodeAttribute(decoder, mesh, attribute, accessorDef) {
const dataType = DATA_TYPE[accessorDef.componentType];
const ArrayCtor = COMPONENT_ARRAY[accessorDef.componentType];
const numComponents = attribute.num_components();
const numPoints = mesh.num_points();
const numValues = numPoints * numComponents;
const byteLength = numValues * ArrayCtor.BYTES_PER_ELEMENT;
const ptr = decoderModule._malloc(byteLength);
decoder.GetAttributeDataArrayForAllPoints(mesh, attribute, dataType, byteLength, ptr);
const array = new ArrayCtor(decoderModule.HEAPF32.buffer, ptr, numValues).slice();
decoderModule._free(ptr);
return array;
}
function initDecoderModule(_decoderModule) {
decoderModule = _decoderModule;
COMPONENT_ARRAY = {
[core.Accessor.ComponentType.FLOAT]: Float32Array,
[core.Accessor.ComponentType.UNSIGNED_INT]: Uint32Array,
[core.Accessor.ComponentType.UNSIGNED_SHORT]: Uint16Array,
[core.Accessor.ComponentType.UNSIGNED_BYTE]: Uint8Array,
[core.Accessor.ComponentType.SHORT]: Int16Array,
[core.Accessor.ComponentType.BYTE]: Int8Array
};
DATA_TYPE = {
[core.Accessor.ComponentType.FLOAT]: decoderModule.DT_FLOAT32,
[core.Accessor.ComponentType.UNSIGNED_INT]: decoderModule.DT_UINT32,
[core.Accessor.ComponentType.UNSIGNED_SHORT]: decoderModule.DT_UINT16,
[core.Accessor.ComponentType.UNSIGNED_BYTE]: decoderModule.DT_UINT8,
[core.Accessor.ComponentType.SHORT]: decoderModule.DT_INT16,
[core.Accessor.ComponentType.BYTE]: decoderModule.DT_INT8
};
}
let encoderModule;
var EncoderMethod;
(function (EncoderMethod) {
EncoderMethod[EncoderMethod["EDGEBREAKER"] = 1] = "EDGEBREAKER";
EncoderMethod[EncoderMethod["SEQUENTIAL"] = 0] = "SEQUENTIAL";
})(EncoderMethod || (EncoderMethod = {}));
var AttributeEnum;
(function (AttributeEnum) {
AttributeEnum["POSITION"] = "POSITION";
AttributeEnum["NORMAL"] = "NORMAL";
AttributeEnum["COLOR"] = "COLOR";
AttributeEnum["TEX_COORD"] = "TEX_COORD";
AttributeEnum["GENERIC"] = "GENERIC";
})(AttributeEnum || (AttributeEnum = {}));
const DEFAULT_QUANTIZATION_BITS = {
[AttributeEnum.POSITION]: 14,
[AttributeEnum.NORMAL]: 10,
[AttributeEnum.COLOR]: 8,
[AttributeEnum.TEX_COORD]: 12,
[AttributeEnum.GENERIC]: 12
};
const DEFAULT_ENCODER_OPTIONS = {
decodeSpeed: 5,
encodeSpeed: 5,
method: EncoderMethod.EDGEBREAKER,
quantizationBits: DEFAULT_QUANTIZATION_BITS,
quantizationVolume: 'mesh'
};
function initEncoderModule(_encoderModule) {
encoderModule = _encoderModule;
}
/**
* References:
* - https://github.com/mrdoob/three.js/blob/dev/examples/js/exporters/DRACOExporter.js
* - https://github.com/CesiumGS/gltf-pipeline/blob/master/lib/compressDracoMeshes.js
*/
function encodeGeometry(prim, _options = DEFAULT_ENCODER_OPTIONS) {
const options = {
...DEFAULT_ENCODER_OPTIONS,
..._options
};
options.quantizationBits = {
...DEFAULT_QUANTIZATION_BITS,
..._options.quantizationBits
};
const builder = new encoderModule.MeshBuilder();
const mesh = new encoderModule.Mesh();
const encoder = new encoderModule.ExpertEncoder(mesh);
const attributeIDs = {};
const dracoBuffer = new encoderModule.DracoInt8Array();
const hasMorphTargets = prim.listTargets().length > 0;
let hasSparseAttributes = false;
for (const semantic of prim.listSemantics()) {
const attribute = prim.getAttribute(semantic);
if (attribute.getSparse()) {
hasSparseAttributes = true;
continue;
}
const attributeEnum = getAttributeEnum(semantic);
const attributeID = addAttribute(builder, attribute.getComponentType(), mesh, encoderModule[attributeEnum], attribute.getCount(), attribute.getElementSize(), attribute.getArray());
if (attributeID === -1) throw new Error(`Error compressing "${semantic}" attribute.`);
attributeIDs[semantic] = attributeID;
if (options.quantizationVolume === 'mesh' || semantic !== 'POSITION') {
encoder.SetAttributeQuantization(attributeID, options.quantizationBits[attributeEnum]);
} else if (typeof options.quantizationVolume === 'object') {
const {
quantizationVolume
} = options;
const range = Math.max(quantizationVolume.max[0] - quantizationVolume.min[0], quantizationVolume.max[1] - quantizationVolume.min[1], quantizationVolume.max[2] - quantizationVolume.min[2]);
encoder.SetAttributeExplicitQuantization(attributeID, options.quantizationBits[attributeEnum], attribute.getElementSize(), quantizationVolume.min, range);
} else {
throw new Error('Invalid quantization volume state.');
}
}
const indi