UNPKG

@hoff97/tensor-js

Version:

PyTorch like deep learning inferrence library

50 lines 2.32 kB
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } return new (P || (P = Promise))(function (resolve, reject) { function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } step((generator = generator.apply(thisArg, _arguments || [])).next()); }); }; import { glContext } from '../../../tensor/gpu/gl'; import { OnnxNode } from '../../node'; export class InstanceNormalizationNode extends OnnxNode { constructor(attributes, inputs, outputs, constants, onnxVersion, mode) { super(attributes, inputs, outputs, constants, onnxVersion, mode); this.epsilon = this.getAttributeFloat('epsilon') || 1e-5; //TODO: Handle onnx versions < 6 here } forward(inputs) { return __awaiter(this, void 0, void 0, function* () { const x = inputs[0]; let scale = inputs[1]; let B = inputs[2]; const dataRank = x.getShape().length - 2; const C = scale.getShape()[0]; const newShape = [1, C, ...new Array(dataRank).fill(1)]; scale = scale.reshape(newShape, false); B = B.reshape(newShape, false); const reduceAxes = new Array(x.getShape().length - 2); for (let i = 0; i < dataRank; i++) { reduceAxes[i] = i + 2; } const mean = x.reduceMean(reduceAxes, true); glContext.flush(); const diff = x.subtract(mean); glContext.flush(); const variance = diff.reduceMeanSquare(reduceAxes, true); glContext.flush(); const result = x.normalize(mean, variance, this.epsilon, scale, B); mean.delete(); diff.delete(); variance.delete(); return [result]; }); } getType() { return 'InstanceNormalization'; } delete() { } } //# sourceMappingURL=instanceNormalization.js.map