@hoff97/tensor-js
Version:
PyTorch like deep learning inferrence library
115 lines • 4.58 kB
JavaScript
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
import { Variable } from '../autograd/variable';
import { CPUTensor } from '../tensor/cpu/tensor';
import { GPUTensor } from '../tensor/gpu/tensor';
import { SparseTensor } from '../tensor/sparse/tensor';
import { WASMTensor } from '../tensor/wasm/tensor';
/**
* Convert a tensor to the specified backend
*/
export function toBackend(tensor, backend) {
return __awaiter(this, void 0, void 0, function* () {
if (backend === 'CPU') {
return toCPU(tensor);
}
else if (backend === 'WASM') {
return toWASM(tensor);
}
else {
return toGPU(tensor);
}
});
}
export function toCPU(tensor) {
return __awaiter(this, void 0, void 0, function* () {
if (tensor instanceof Variable) {
return new Variable(yield toCPU(tensor.value), {
grad: tensor.grad !== undefined ? yield toCPU(tensor.grad) : undefined,
});
}
else if (tensor instanceof SparseTensor) {
return new SparseTensor(yield toCPU(tensor.values), yield toCPU(tensor.indices), tensor.shape, tensor.denseDims);
}
if (tensor instanceof CPUTensor) {
return tensor;
}
const values = yield tensor.getValues();
return new CPUTensor(tensor.getShape(), values, tensor.dtype);
});
}
export function toWASM(tensor) {
return __awaiter(this, void 0, void 0, function* () {
if (tensor.dtype === 'float16') {
throw new Error('Cant represent float16 tensor on Wasm backend');
}
if (tensor instanceof Variable) {
return new Variable(yield toWASM(tensor.value), {
grad: tensor.grad !== undefined ? yield toWASM(tensor.grad) : undefined,
});
}
else if (tensor instanceof SparseTensor) {
return new SparseTensor(yield toWASM(tensor.values), yield toWASM(tensor.indices), tensor.shape, tensor.denseDims);
}
if (tensor instanceof WASMTensor) {
return tensor;
}
const values = yield tensor.getValues();
if (tensor instanceof CPUTensor && values instanceof Int32Array) {
return tensor;
}
return new WASMTensor(Array.from(values), new Uint32Array(tensor.getShape()), tensor.dtype);
});
}
export function toGPU(tensor) {
return __awaiter(this, void 0, void 0, function* () {
if (tensor.dtype === 'float64') {
throw new Error('Cant represent float64 tensor on WebGL backend');
}
if (tensor instanceof Variable) {
return new Variable(yield toGPU(tensor.value), {
grad: tensor.grad !== undefined ? yield toGPU(tensor.grad) : undefined,
});
}
else if (tensor instanceof SparseTensor) {
return new SparseTensor(yield toGPU(tensor.values), yield toGPU(tensor.indices), tensor.shape, tensor.denseDims);
}
if (tensor instanceof GPUTensor) {
return tensor;
}
const values = yield tensor.getValues();
if (tensor instanceof CPUTensor && values instanceof Int32Array) {
return tensor;
}
return new GPUTensor(Array.from(values), tensor.getShape(), tensor.dtype);
});
}
/**
* Determines if the two tensors are of the same type, ie. if they are on the same backend
*/
export function sameType(a, b) {
if (a.dtype !== b.dtype) {
return false;
}
if (a instanceof Variable && b instanceof Variable) {
return sameType(a.value, b.value);
}
if (a instanceof CPUTensor && b instanceof CPUTensor) {
return true;
}
if (a instanceof WASMTensor && b instanceof WASMTensor) {
return true;
}
if (a instanceof GPUTensor && b instanceof GPUTensor) {
return true;
}
return false;
}
//# sourceMappingURL=convert.js.map