@hoff97/tensor-js
Version:
PyTorch like deep learning inferrence library
94 lines (93 loc) • 4.82 kB
TypeScript
import Tensor, { Activation, DType, PadMode, TensorValues } from '../../types';
export declare class CPUTensor<DTpe extends DType = 'float32'> extends Tensor<DTpe> {
static range(start: number, limit: number, delta: number): CPUTensor<"float32">;
/**
* Array of values of the tensor in contiguous layout
*/
values: TensorValues[DTpe];
/**
* Shape of the tensor
*/
shape: ReadonlyArray<number>;
/**
* Strides for all dimensions, ie. the step size per dimension in the contiguous layout
*/
strides: ReadonlyArray<number>;
/**
* Total number of entries in the tensor
*/
size: number;
/**
* If this tensor was already deleted
*/
deleted: boolean;
constructor(shape: ReadonlyArray<number>, values?: TensorValues[DTpe] | number[], dtype?: DTpe);
getValues(): Promise<TensorValues[DTpe]>;
getShape(): readonly number[];
constantLike(value: number): Tensor<DTpe>;
singleConstant(value: number): Tensor<DTpe>;
cast<DTpe2 extends DType>(dtype: DTpe2): Tensor<DTpe2>;
delete(): void;
copy(newShape?: number[]): Tensor<DTpe>;
get(index: number[] | number): number;
set(index: number[] | number, value: number): void;
setValues(values: Tensor<DTpe>, starts: number[]): Tensor<DTpe>;
exp(): Tensor<DTpe>;
log(): Tensor<DTpe>;
sqrt(): Tensor<DTpe>;
abs(): Tensor<DTpe>;
sin(): Tensor<DTpe>;
cos(): Tensor<DTpe>;
tan(): Tensor<DTpe>;
asin(): Tensor<DTpe>;
acos(): Tensor<DTpe>;
atan(): Tensor<DTpe>;
sinh(): Tensor<DTpe>;
cosh(): Tensor<DTpe>;
tanh(): Tensor<DTpe>;
asinh(): Tensor<DTpe>;
acosh(): Tensor<DTpe>;
atanh(): Tensor<DTpe>;
floor(): Tensor<DTpe>;
ceil(): Tensor<DTpe>;
round(): Tensor<DTpe>;
negate(): Tensor<DTpe>;
powerScalar(power: number, factor: number): Tensor<DTpe>;
multiplyScalar(value: number): Tensor<DTpe>;
addScalar(value: number): Tensor<DTpe>;
addMultiplyScalar(factor: number, add: number): Tensor<DTpe>;
sign(): Tensor<DTpe>;
clip(min?: number, max?: number): Tensor<DTpe>;
clipBackward(grad: Tensor<DTpe>, min?: number, max?: number): Tensor<DTpe>;
sigmoid(): Tensor<DTpe>;
hardSigmoid(alpha: number, beta: number): Tensor<DTpe>;
add_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number, beta: number): Tensor<DTpe>;
subtract_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number, beta: number): Tensor<DTpe>;
multiply_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number): Tensor<DTpe>;
divide_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number): Tensor<DTpe>;
power_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[]): Tensor<DTpe>;
matMul(tensor: Tensor<DTpe>): Tensor<DTpe>;
gemm_impl(b: Tensor<DTpe>, aTranspose: boolean, bTranspose: boolean, alpha: number, beta: number, c?: Tensor<DTpe>): Tensor<DTpe>;
sum_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
sumSquare_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
product_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
max_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
min_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
reduceMean_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
reduceMeanSquare_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
reduceLogSum_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
reduceLogSumExp_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
conv_impl(kernel: Tensor<DTpe>, dilations: number[], group: number, pads: number[], strides: number[], activation: Activation, bias?: Tensor<DTpe>): Tensor<DTpe>;
protected convTranspose_impl(kernel: Tensor<DTpe>, dilations: number[], group: number, pads: number[], strides: number[]): Tensor<DTpe>;
pad_impl(pads: number[], mode: PadMode, value: number): Tensor<DTpe>;
averagePool_impl(kernelShape: number[], pads: number[], strides: number[], includePad: boolean): Tensor<DTpe>;
reshape_impl(shape: number[], copy: boolean): Tensor<DTpe>;
concat(tensor: Tensor<DTpe>, axis: number): Tensor<DTpe>;
transpose_impl(permutation: number[]): Tensor<DTpe>;
repeat(repeats: number[]): Tensor<DTpe>;
expand(shape: readonly number[]): Tensor<DTpe>;
gather(axis: number, indices: CPUTensor<'uint32'>): Tensor<DTpe>;
slice_impl(starts: number[], ends: number[], axes: number[], steps: number[]): Tensor<DTpe>;
upsample(scales: number[]): Tensor<DTpe>;
normalize(mean: Tensor<DTpe>, variance: Tensor<DTpe>, epsilon: number, scale: Tensor<DTpe>, bias: Tensor<DTpe>): Tensor<DTpe>;
}