@hoff97/tensor-js
Version:
PyTorch like deep learning inferrence library
306 lines (305 loc) • 11.9 kB
TypeScript
import Tensor, { Activation, DType, PadMode, TensorValues } from '../../types';
import { CPUTensor } from '../cpu/tensor';
export declare class SparseTensor<DTpe extends DType = 'float32'> extends Tensor<DTpe> {
/**
* Values of the nonzero entries
*/
values: Tensor<DTpe>;
/**
* Coordinates of the nonzero entries. Has shape [nnz, S],
* where nnz is the number of nonzero entries and S the number of
* sparse dimension.
*
* Each row contains the coordinate of the respective nonzero entry.
*/
indices: Tensor<'uint32'>;
/**
* Shape of the tensor
*/
shape: readonly number[];
/**
* Number of dense dimensions. Defaults to 0
*/
denseDims: number;
/**
* Creates a sparse tensor with zero dense dimensions from a dense CPU tensor.
*
* @example
* ```typescript
* const denseTensor = new CPUTensor([3,3],[1,0,0,0,2,0,0,3,4]);
*
* const sparseTensor = SparseTensor.fromDense(denseTensor);
* console.log(sparseTensor.nnz); // Will log '4'
* console.log(sparseTensor.sparseDims); // Will log '2'
* ```
*/
static fromDense<DTpe extends DType>(tensor: CPUTensor<DTpe>): SparseTensor<DTpe>;
/**
* Total number of entries (including zero entries) in the tensor
*/
size: number;
/**
* Dense strides of the tensor
*/
strides: number[];
/**
* Number of nonzero entries in the tensor
*/
nnz: number;
/**
* Number of sparse dimensions
*/
sparseDims: number;
/**
* Creates a new sparse tensor in coordinate format. The tensor has
* a number of sparse dimensions and optionally a number of dense
* dimensions. The shape of a sparse tensor can thus be decomposed
* into [...S, ...D], where S is the shape of the sparse dimensions
* and D the shape of the dense dimensions. By default the number of
* dense dimensions is zero
*
* The values tensor holds all non-zero values and has shape [NNZ, ...D]
* where NNZ is the number of non-zero entries. The indices tensor
* holds the location of all non-zero entries of the tensor and
* has shape [NNZ, |S|] (where |S| is the number of sparse dimensions).
*
* Note that all indexes that are not specified are implicitly zero.
* This does however **not** mean that they become non-zero on
* certain element wise operations. Instead element wise operations
* maintain the sparsity pattern. Otherwise, many operations would
* create effectively dense tensors (eg. exp()), or would simply not be
* well defined (eg. log()).
*
* @example
*
* If you want to create a sparse tensor, equivalent to the following CPU
* tensor:
* ```typescript
* const a = new CPUTensor([3,3],[1,0,0,0,2,0,0,3,4]);
* ```
* you collect the indices, where the value is nonzero:
* ```typescript
* const indices = [
* 0,0, // Corresponds to value 1
* 1,1, // Corresponds to value 2
* 2,1, // Corresponds to value 3
* 2,2 // Corresponds to value 4
* ];
* const indiceTensor = new CPUTensor([4, 2], indices, 'uint32');
* ```
* and the corresponding values:
* ```typescript
* const values = [1,2,3,4];
* const valueTensor = new CPUTensor([4],values);
*
* const sparseTensor = new SparseTensor(valueTensor, indiceTensor, [3,3]);
* ```
*/
constructor(
/**
* Values of the nonzero entries
*/
values: Tensor<DTpe>,
/**
* Coordinates of the nonzero entries. Has shape [nnz, S],
* where nnz is the number of nonzero entries and S the number of
* sparse dimension.
*
* Each row contains the coordinate of the respective nonzero entry.
*/
indices: Tensor<'uint32'>,
/**
* Shape of the tensor
*/
shape: readonly number[],
/**
* Number of dense dimensions. Defaults to 0
*/
denseDims?: number);
getValues(): Promise<TensorValues[DTpe]>;
/**
* Sparse part of the shape of the tensor, ie. the S first values of
* the shape, where S is then number of sparse dimension.
*/
getSparseShape(): readonly number[];
/**
* Dense part of the shape of the tensor, ie. the D last values of
* the shape, where D is then number of dense dimension.
*/
getDenseShape(): readonly number[];
getShape(): readonly number[];
/**
* Creates a new sparse tensor with the same sparsity shape
* and the given value everywhere.
*
* @param value Constant value to set at every position
*/
constantLike(value: number): Tensor<DTpe>;
/**
* Not implemented yet
*/
singleConstant(value: number): Tensor<DTpe>;
cast<DTpe2 extends DType>(dtype: DTpe2): Tensor<DTpe2>;
delete(): void;
protected reshape_impl(shape: readonly number[], copy: boolean): Tensor<DTpe>;
exp(): Tensor<DTpe>;
log(): Tensor<DTpe>;
sqrt(): Tensor<DTpe>;
abs(): Tensor<DTpe>;
sin(): Tensor<DTpe>;
cos(): Tensor<DTpe>;
tan(): Tensor<DTpe>;
asin(): Tensor<DTpe>;
acos(): Tensor<DTpe>;
atan(): Tensor<DTpe>;
sinh(): Tensor<DTpe>;
cosh(): Tensor<DTpe>;
tanh(): Tensor<DTpe>;
asinh(): Tensor<DTpe>;
acosh(): Tensor<DTpe>;
atanh(): Tensor<DTpe>;
negate(): Tensor<DTpe>;
powerScalar(power: number, factor: number): Tensor<DTpe>;
sigmoid(): Tensor<DTpe>;
hardSigmoid(alpha: number, beta: number): Tensor<DTpe>;
sign(): Tensor<DTpe>;
addMultiplyScalar(factor: number, add: number): Tensor<DTpe>;
/**
* Calculates the matrix product. This tensor should have shape [M,N]
*
* Two cases are supported for sparse tensors:
* - If this tensor has one sparse dimension, the resulting tensor is
* a sparse tensor with the same number of non-zero entries
* - If this tensor has two sparse dimensions, the resulting tensor
* is dense.
* Right now this only supports sparse-dense matrix multiplication.
* Supported on
* - All backends if the sparse tensor has 1 sparse dimensions
* - Only on CPU/WASM if the sparse tensor has no sparse dimensions
*
* @param tensor Dense matrix to multiply with. Should have shape [N,O]
*
* @result Tensor with shape [M,O]
*/
matMul(tensor: Tensor<DTpe>): Tensor<DTpe>;
/**
* Concatenate the two tensors along the given axis
*
* Note that at the moment, only concatenation along
* sparse dimensions is supported!
*
*/
concat(tensor: Tensor<DTpe>, axis: number): Tensor<DTpe>;
clip(min?: number, max?: number): Tensor<DTpe>;
/**
* Not implemented yet
*/
clipBackward(grad: Tensor<DTpe>, min?: number, max?: number): Tensor<DTpe>;
repeat(repeats: number[]): Tensor<DTpe>;
/**
* Not implemented yet
*/
expand(shape: readonly number[]): Tensor<DTpe>;
copy(): Tensor<DTpe>;
/**
* Not implemented yet
*/
gather(axis: number, indices: CPUTensor<'uint32'>): Tensor<DTpe>;
/**
* Not implemented yet
*/
setValues(values: Tensor<DTpe>, starts: number[]): Tensor<DTpe>;
floor(): Tensor<DTpe>;
ceil(): Tensor<DTpe>;
round(): Tensor<DTpe>;
/**
* Not implemented yet
*/
upsample(scales: number[]): Tensor<DTpe>;
/**
* Not implemented yet
*/
normalize(mean: Tensor<DTpe>, variance: Tensor<DTpe>, epsilon: number, scale: Tensor<DTpe>, bias: Tensor<DTpe>): Tensor<DTpe>;
/**
* Adds a second tensor, which can either be a sparse or a dense tensor:
* - If the second tensor is a dense tensor, it is assumed that it has a rank at most
* equal to the dense dimensions of the first tensor.
* If this is not the case, entries in the second tensors that are zero in the first
* tensor are simply ignored!
* This also means that broadcasting in the first tensor is only supported
* on the dense dimensions!
* - If the second tensor is a sparse tensor, it is assumed that the first and
* second tensor have exactly the same sparsity pattern!
*
* This is not supported on the WebGL backend yet.
*/
add(tensor: Tensor<DTpe>, alpha?: number, beta?: number): Tensor<DTpe>;
/**
* Subtracts a second tensor, which can either be a sparse or a dense tensor.
* The same restrictions as for {@link SparseTensor.add} apply!
*/
subtract(tensor: Tensor<DTpe>, alpha?: number, beta?: number): Tensor<DTpe>;
/**
* Multiplies a second tensor element wise, which can either be a sparse or a dense tensor.
* The same restrictions as for {@link SparseTensor.add} apply!
*/
multiply(tensor: Tensor<DTpe>, alpha?: number): Tensor<DTpe>;
/**
* Divides a second tensor element wise, which can either be a sparse or a dense tensor.
* The same restrictions as for {@link SparseTensor.add} apply!
*/
divide(tensor: Tensor<DTpe>, alpha?: number): Tensor<DTpe>;
add_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number, beta: number): Tensor<DTpe>;
subtract_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number, beta: number): Tensor<DTpe>;
multiply_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number): Tensor<DTpe>;
divide_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number): Tensor<DTpe>;
/**
* Not implemented yet
*/
power_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[]): Tensor<DTpe>;
/**
* Not implemented yet
*/
gemm_impl(b: Tensor<DTpe>, aTranspose: boolean, bTranspose: boolean, alpha: number, beta: number, C?: Tensor<DTpe>): Tensor<DTpe>;
/**
* Sums over sparse and/or dense dimensions according to the specified
* axes.
*
* - If summing only over dense dimensions, all backends are supported.
* - If summing over sparse dimensions, only CPU/WebGL are supported
*/
sum(axes?: number | number[], keepDims?: boolean): Tensor<DTpe>;
protected sum_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected sumSquare_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected product_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected max_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected min_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected reduceMean_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected reduceMeanSquare_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected reduceLogSum_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected reduceLogSumExp_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
/**
* Not implemented yet
*/
protected conv_impl(kernel: Tensor<DTpe>, dilations: number[], group: number, pads: number[], strides: number[], activation: Activation, bias?: Tensor<DTpe>): Tensor<DTpe>;
/**
* Not implemented yet
*/
protected convTranspose_impl(kernel: Tensor<DTpe>, dilations: number[], group: number, pads: number[], strides: number[]): Tensor<DTpe>;
/**
* Not implemented yet
*/
protected pad_impl(pads: number[], mode: PadMode, value: number): Tensor<DTpe>;
/**
* Not implemented yet
*/
protected averagePool_impl(kernelShape: number[], pads: number[], strides: number[], includePad: boolean): Tensor<DTpe>;
/**
* Not implemented yet
*/
protected transpose_impl(permutation: number[]): Tensor<DTpe>;
/**
* Not implemented yet
*/
protected slice_impl(starts: number[], ends: number[], axes: number[], steps: number[]): Tensor<DTpe>;
}