@hoff97/tensor-js
Version:
PyTorch like deep learning inferrence library
44 lines • 2.05 kB
JavaScript
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
import { OnnxNode } from '../../node';
export class ConcatNode extends OnnxNode {
constructor(attributes, inputs, outputs, constants, onnxVersion, mode) {
super(attributes, inputs, outputs, constants, onnxVersion, mode);
if (onnxVersion < 13) {
//@ts-ignore
this.axis = this.getAttributeInt('axis');
}
}
forward(inputs) {
return __awaiter(this, void 0, void 0, function* () {
if (inputs.length > 2) {
// This logging seems to slow down the operation more than the operation itself
//console.warn(`Concat with more than 2 tensors is currently slow. Doing concat with ${inputs.length} tensors`);
}
if (this.onnxVersion < 13 && this.axis !== undefined) {
let result = inputs[0];
for (let i = 1; i < inputs.length; i++) {
const newRes = result.concat(inputs[i], this.axis);
if (i > 1) {
result.delete();
}
result = newRes;
}
return [result];
}
throw new Error(`Concat not implemented for onnx version ${this.onnxVersion}`);
});
}
getType() {
return 'Concat';
}
delete() { }
}
//# sourceMappingURL=concat.js.map