UNPKG

@hoff97/tensor-js

Version:

PyTorch like deep learning inferrence library

38 lines 1.78 kB
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } return new (P || (P = Promise))(function (resolve, reject) { function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } step((generator = generator.apply(thisArg, _arguments || [])).next()); }); }; import { CPUTensor } from '../../tensor/cpu/tensor'; import { OnnxNode } from '../node'; export class ExpandNode extends OnnxNode { constructor(attributes, inputs, outputs, constants, onnxVersion, mode) { super(attributes, inputs, outputs, constants, onnxVersion, mode); } forward(inputs) { return __awaiter(this, void 0, void 0, function* () { if (this.onnxVersion < 13) { const tensor = inputs[0]; const _shape = inputs[1]; if (!(_shape instanceof CPUTensor)) { throw new Error('Expand needs cpu tensor as shape tensor'); } const shape = new Array(_shape.size); for (let i = 0; i < _shape.size; i++) { shape[i] = _shape.get(i); } return [tensor.expand(shape)]; } throw new Error(`Expand not yet implemented for onnx version ${this.onnxVersion}`); }); } getType() { return 'Expand'; } delete() { } } //# sourceMappingURL=expand.js.map