@hoff97/tensor-js
Version:
PyTorch like deep learning inferrence library
487 lines • 18.1 kB
JavaScript
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
import { max } from '../../ops/sparse/aggregate/max/max';
import { min } from '../../ops/sparse/aggregate/min/min';
import { product } from '../../ops/sparse/aggregate/product/product';
import { reduceLogSum } from '../../ops/sparse/aggregate/reduceLogSum/reduceLogSum';
import { reduceLogSumExp } from '../../ops/sparse/aggregate/reduceLogSumExp/reduceLogSumExp';
import { reduceMean } from '../../ops/sparse/aggregate/reduceMean/reduceMean';
import { reduceMeanSquare } from '../../ops/sparse/aggregate/reduceMeanSquare/reduceMeanSquare';
import { sum } from '../../ops/sparse/aggregate/sum/sum';
import { sumSquare } from '../../ops/sparse/aggregate/sumSquare/sumSquare';
import { add } from '../../ops/sparse/binary/add/add';
import { divide } from '../../ops/sparse/binary/divide/divide';
import { multiply } from '../../ops/sparse/binary/multiply/multiply';
import { subtract } from '../../ops/sparse/binary/subtract/subtract';
import { concat } from '../../ops/sparse/concat/concat';
import { matMul } from '../../ops/sparse/matMul/matMul';
import { repeat } from '../../ops/sparse/repeat/repeat';
import { reshape } from '../../ops/sparse/reshape/reshape';
import Tensor, { tensorValuesConstructor, } from '../../types';
import { computeStrides, getSize, indexToPos, posToIndex, } from '../../util/shape';
import { CPUTensor } from '../cpu/tensor';
export class SparseTensor extends Tensor {
/**
* Creates a new sparse tensor in coordinate format. The tensor has
* a number of sparse dimensions and optionally a number of dense
* dimensions. The shape of a sparse tensor can thus be decomposed
* into [...S, ...D], where S is the shape of the sparse dimensions
* and D the shape of the dense dimensions. By default the number of
* dense dimensions is zero
*
* The values tensor holds all non-zero values and has shape [NNZ, ...D]
* where NNZ is the number of non-zero entries. The indices tensor
* holds the location of all non-zero entries of the tensor and
* has shape [NNZ, |S|] (where |S| is the number of sparse dimensions).
*
* Note that all indexes that are not specified are implicitly zero.
* This does however **not** mean that they become non-zero on
* certain element wise operations. Instead element wise operations
* maintain the sparsity pattern. Otherwise, many operations would
* create effectively dense tensors (eg. exp()), or would simply not be
* well defined (eg. log()).
*
* @example
*
* If you want to create a sparse tensor, equivalent to the following CPU
* tensor:
* ```typescript
* const a = new CPUTensor([3,3],[1,0,0,0,2,0,0,3,4]);
* ```
* you collect the indices, where the value is nonzero:
* ```typescript
* const indices = [
* 0,0, // Corresponds to value 1
* 1,1, // Corresponds to value 2
* 2,1, // Corresponds to value 3
* 2,2 // Corresponds to value 4
* ];
* const indiceTensor = new CPUTensor([4, 2], indices, 'uint32');
* ```
* and the corresponding values:
* ```typescript
* const values = [1,2,3,4];
* const valueTensor = new CPUTensor([4],values);
*
* const sparseTensor = new SparseTensor(valueTensor, indiceTensor, [3,3]);
* ```
*/
constructor(
/**
* Values of the nonzero entries
*/
values,
/**
* Coordinates of the nonzero entries. Has shape [nnz, S],
* where nnz is the number of nonzero entries and S the number of
* sparse dimension.
*
* Each row contains the coordinate of the respective nonzero entry.
*/
indices,
/**
* Shape of the tensor
*/
shape,
/**
* Number of dense dimensions. Defaults to 0
*/
denseDims = 0) {
super(values.dtype);
this.values = values;
this.indices = indices;
this.shape = shape;
this.denseDims = denseDims;
this.size = getSize(shape);
this.strides = computeStrides(shape);
this.nnz = this.indices.getShape()[0];
this.sparseDims = this.shape.length - this.denseDims;
}
/**
* Creates a sparse tensor with zero dense dimensions from a dense CPU tensor.
*
* @example
* ```typescript
* const denseTensor = new CPUTensor([3,3],[1,0,0,0,2,0,0,3,4]);
*
* const sparseTensor = SparseTensor.fromDense(denseTensor);
* console.log(sparseTensor.nnz); // Will log '4'
* console.log(sparseTensor.sparseDims); // Will log '2'
* ```
*/
static fromDense(tensor) {
let nnz = 0;
const ix = [];
const vals = [];
for (let i = 0; i < tensor.size; i++) {
if (tensor.get(i) !== 0) {
nnz++;
const index = posToIndex(i, tensor.strides);
for (let j = 0; j < tensor.shape.length; j++) {
ix.push(index[j]);
}
vals.push(tensor.get(i));
}
}
const indices = new CPUTensor([nnz, tensor.shape.length], ix, 'uint32');
const values = new CPUTensor([nnz], vals, tensor.dtype);
return new SparseTensor(values, indices, tensor.shape);
}
getValues() {
return __awaiter(this, void 0, void 0, function* () {
const vals = yield this.values.getValues();
const indices = yield this.indices.getValues();
const denseSize = getSize(this.getDenseShape(), 1);
const sparseStrides = computeStrides(this.getSparseShape());
const result = new tensorValuesConstructor[this.values.dtype](this.size);
for (let i = 0; i < this.nnz; i++) {
const sparseIx = [];
for (let j = 0; j < this.sparseDims; j++) {
sparseIx.push(indices[i * this.sparseDims + j]);
}
const sparsePos = indexToPos(sparseIx, sparseStrides);
for (let j = 0; j < denseSize; j++) {
result[sparsePos * denseSize + j] = vals[i * denseSize + j];
}
}
return result;
});
}
/**
* Sparse part of the shape of the tensor, ie. the S first values of
* the shape, where S is then number of sparse dimension.
*/
getSparseShape() {
return this.shape.slice(0, this.shape.length - this.denseDims);
}
/**
* Dense part of the shape of the tensor, ie. the D last values of
* the shape, where D is then number of dense dimension.
*/
getDenseShape() {
return this.shape.slice(this.shape.length - this.denseDims);
}
getShape() {
return this.shape;
}
/**
* Creates a new sparse tensor with the same sparsity shape
* and the given value everywhere.
*
* @param value Constant value to set at every position
*/
constantLike(value) {
return new SparseTensor(this.values.constantLike(value), this.indices.copy(), this.shape, this.denseDims);
}
/**
* Not implemented yet
*/
singleConstant(value) {
throw new Error('Method not implemented.');
}
cast(dtype) {
return new SparseTensor(this.values.cast(dtype), this.indices.copy(), this.shape, this.denseDims);
}
delete() {
this.values.delete();
this.indices.delete();
}
reshape_impl(shape, copy) {
return reshape(this, shape, copy);
}
exp() {
return new SparseTensor(this.values.exp(), this.indices.copy(), this.shape, this.denseDims);
}
log() {
return new SparseTensor(this.values.log(), this.indices.copy(), this.shape, this.denseDims);
}
sqrt() {
return new SparseTensor(this.values.sqrt(), this.indices.copy(), this.shape, this.denseDims);
}
abs() {
return new SparseTensor(this.values.abs(), this.indices.copy(), this.shape, this.denseDims);
}
sin() {
return new SparseTensor(this.values.sin(), this.indices.copy(), this.shape, this.denseDims);
}
cos() {
return new SparseTensor(this.values.cos(), this.indices.copy(), this.shape, this.denseDims);
}
tan() {
return new SparseTensor(this.values.tan(), this.indices.copy(), this.shape, this.denseDims);
}
asin() {
return new SparseTensor(this.values.asin(), this.indices.copy(), this.shape, this.denseDims);
}
acos() {
return new SparseTensor(this.values.acos(), this.indices.copy(), this.shape, this.denseDims);
}
atan() {
return new SparseTensor(this.values.atan(), this.indices.copy(), this.shape, this.denseDims);
}
sinh() {
return new SparseTensor(this.values.sinh(), this.indices.copy(), this.shape, this.denseDims);
}
cosh() {
return new SparseTensor(this.values.cosh(), this.indices.copy(), this.shape, this.denseDims);
}
tanh() {
return new SparseTensor(this.values.tanh(), this.indices.copy(), this.shape, this.denseDims);
}
asinh() {
return new SparseTensor(this.values.asinh(), this.indices.copy(), this.shape, this.denseDims);
}
acosh() {
return new SparseTensor(this.values.acosh(), this.indices.copy(), this.shape, this.denseDims);
}
atanh() {
return new SparseTensor(this.values.atanh(), this.indices.copy(), this.shape, this.denseDims);
}
negate() {
return new SparseTensor(this.values.negate(), this.indices.copy(), this.shape, this.denseDims);
}
powerScalar(power, factor) {
return new SparseTensor(this.values.powerScalar(power, factor), this.indices.copy(), this.shape, this.denseDims);
}
sigmoid() {
return new SparseTensor(this.values.sigmoid(), this.indices.copy(), this.shape, this.denseDims);
}
hardSigmoid(alpha, beta) {
return new SparseTensor(this.values.hardSigmoid(alpha, beta), this.indices.copy(), this.shape, this.denseDims);
}
sign() {
return new SparseTensor(this.values.sign(), this.indices.copy(), this.shape, this.denseDims);
}
addMultiplyScalar(factor, add) {
return new SparseTensor(this.values.addMultiplyScalar(factor, add), this.indices.copy(), this.shape, this.denseDims);
}
/**
* Calculates the matrix product. This tensor should have shape [M,N]
*
* Two cases are supported for sparse tensors:
* - If this tensor has one sparse dimension, the resulting tensor is
* a sparse tensor with the same number of non-zero entries
* - If this tensor has two sparse dimensions, the resulting tensor
* is dense.
* Right now this only supports sparse-dense matrix multiplication.
* Supported on
* - All backends if the sparse tensor has 1 sparse dimensions
* - Only on CPU/WASM if the sparse tensor has no sparse dimensions
*
* @param tensor Dense matrix to multiply with. Should have shape [N,O]
*
* @result Tensor with shape [M,O]
*/
matMul(tensor) {
return matMul(this, tensor);
}
/**
* Concatenate the two tensors along the given axis
*
* Note that at the moment, only concatenation along
* sparse dimensions is supported!
*
*/
concat(tensor, axis) {
if (!(tensor instanceof SparseTensor)) {
throw new Error('Can only concatenate sparse tensors!');
}
return concat(this, tensor, axis);
}
clip(min, max) {
return new SparseTensor(this.values.clip(min, max), this.indices.copy(), this.shape, this.denseDims);
}
/**
* Not implemented yet
*/
clipBackward(grad, min, max) {
throw new Error('Method not implemented.');
}
repeat(repeats) {
return repeat(this, repeats);
}
/**
* Not implemented yet
*/
expand(shape) {
throw new Error('Method not implemented.');
}
copy() {
return new SparseTensor(this.values.copy(), this.indices.copy(), this.shape, this.denseDims);
}
/**
* Not implemented yet
*/
gather(axis, indices) {
throw new Error('Method not implemented.');
}
/**
* Not implemented yet
*/
setValues(values, starts) {
throw new Error('Method not implemented.');
}
floor() {
return new SparseTensor(this.values.floor(), this.indices.copy(), this.shape, this.denseDims);
}
ceil() {
return new SparseTensor(this.values.ceil(), this.indices.copy(), this.shape, this.denseDims);
}
round() {
return new SparseTensor(this.values.round(), this.indices.copy(), this.shape, this.denseDims);
}
/**
* Not implemented yet
*/
upsample(scales) {
throw new Error('Method not implemented.');
}
/**
* Not implemented yet
*/
normalize(mean, variance, epsilon, scale, bias) {
throw new Error('Method not implemented.');
}
/**
* Adds a second tensor, which can either be a sparse or a dense tensor:
* - If the second tensor is a dense tensor, it is assumed that it has a rank at most
* equal to the dense dimensions of the first tensor.
* If this is not the case, entries in the second tensors that are zero in the first
* tensor are simply ignored!
* This also means that broadcasting in the first tensor is only supported
* on the dense dimensions!
* - If the second tensor is a sparse tensor, it is assumed that the first and
* second tensor have exactly the same sparsity pattern!
*
* This is not supported on the WebGL backend yet.
*/
add(tensor, alpha, beta) {
return super.add(tensor, alpha, beta);
}
/**
* Subtracts a second tensor, which can either be a sparse or a dense tensor.
* The same restrictions as for {@link SparseTensor.add} apply!
*/
subtract(tensor, alpha, beta) {
return super.subtract(tensor, alpha, beta);
}
/**
* Multiplies a second tensor element wise, which can either be a sparse or a dense tensor.
* The same restrictions as for {@link SparseTensor.add} apply!
*/
multiply(tensor, alpha) {
return super.multiply(tensor, alpha);
}
/**
* Divides a second tensor element wise, which can either be a sparse or a dense tensor.
* The same restrictions as for {@link SparseTensor.add} apply!
*/
divide(tensor, alpha) {
return super.divide(tensor, alpha);
}
add_impl(th, tensor, resultShape, alpha, beta) {
return add(th, tensor, resultShape, alpha, beta);
}
subtract_impl(th, tensor, resultShape, alpha, beta) {
return subtract(th, tensor, resultShape, alpha, beta);
}
multiply_impl(th, tensor, resultShape, alpha) {
return multiply(th, tensor, resultShape, alpha);
}
divide_impl(th, tensor, resultShape, alpha) {
return divide(th, tensor, resultShape, alpha);
}
/**
* Not implemented yet
*/
power_impl(th, tensor, resultShape) {
throw new Error('Method not implemented.');
}
/**
* Not implemented yet
*/
gemm_impl(b, aTranspose, bTranspose, alpha, beta, C) {
throw new Error('Method not implemented.');
}
/**
* Sums over sparse and/or dense dimensions according to the specified
* axes.
*
* - If summing only over dense dimensions, all backends are supported.
* - If summing over sparse dimensions, only CPU/WebGL are supported
*/
sum(axes, keepDims) {
return super.sum(axes, keepDims);
}
sum_impl(axes, keepDims) {
return sum(this, axes, keepDims);
}
sumSquare_impl(axes, keepDims) {
return sumSquare(this, axes, keepDims);
}
product_impl(axes, keepDims) {
return product(this, axes, keepDims);
}
max_impl(axes, keepDims) {
return max(this, axes, keepDims);
}
min_impl(axes, keepDims) {
return min(this, axes, keepDims);
}
reduceMean_impl(axes, keepDims) {
return reduceMean(this, axes, keepDims);
}
reduceMeanSquare_impl(axes, keepDims) {
return reduceMeanSquare(this, axes, keepDims);
}
reduceLogSum_impl(axes, keepDims) {
return reduceLogSum(this, axes, keepDims);
}
reduceLogSumExp_impl(axes, keepDims) {
return reduceLogSumExp(this, axes, keepDims);
}
/**
* Not implemented yet
*/
conv_impl(kernel, dilations, group, pads, strides, activation, bias) {
throw new Error('Method not implemented.');
}
/**
* Not implemented yet
*/
convTranspose_impl(kernel, dilations, group, pads, strides) {
throw new Error('Method not implemented.');
}
/**
* Not implemented yet
*/
pad_impl(pads, mode, value) {
throw new Error('Method not implemented.');
}
/**
* Not implemented yet
*/
averagePool_impl(kernelShape, pads, strides, includePad) {
throw new Error('Method not implemented.');
}
/**
* Not implemented yet
*/
transpose_impl(permutation) {
throw new Error('Method not implemented.');
}
/**
* Not implemented yet
*/
slice_impl(starts, ends, axes, steps) {
throw new Error('Method not implemented.');
}
}
//# sourceMappingURL=tensor.js.map