@hoff97/tensor-js
Version:
PyTorch like deep learning inferrence library
767 lines • 26.1 kB
JavaScript
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
import { compareShapes, getSize } from './util/shape';
export const tensorValuesConstructor = {
float64: Float64Array,
float32: Float32Array,
float16: Float32Array,
int32: Int32Array,
int16: Int16Array,
int8: Int8Array,
uint32: Uint32Array,
uint16: Uint16Array,
uint8: Uint8Array,
};
/**
* Multi-dimensional array ala numpy.
*
* A tensor is any multidimensional array. The number of
* dimensions is called the rank, and the size of all dimensions the shape.
*
* @example
* ```typescript
* const a = [[1,2,3],[4,5,6]];
* ```
* here a has rank 2 and shape [2,3].
*
* Tensors store values of a particular data type like floats or integers.
* The datatype can be accessed via the dtype property.
*
* Many operations can be done on tensors. For fast execution, three different
* backends exist:
* - CPU: Simple to use and works in any browser, but not particularly fast
* - WebAssembly: Reasonably fast and works in most modern browsers
* - WebGL: Very fast when a GPU is available, but comes with some restrictions
*/
export default class Tensor {
constructor(dtype) {
this.dtype = dtype;
}
/**
* Compares this tensor to another tensor.
*
* @param tensor Tensor to compare to
* @param epsilon Optional maximum difference between the tensors. If not specified the tensors have to be exactly equal
*
* @example
* ```typescript
* const a = new CPUTensor([2,2], [1,2,3,4]);
* const b = new CPUTensor([2,2], [1.1,2.1,2.9,4.05]);
* const c = new CPUTensor([4], [1,2,3,4]);
* a.compare(b, 0.5).then(equal => {
* //equal will be true
* });
*
* a.compare(b).then(equal => {
* //equal will be false
* });
*
* a.compare(c).then(equal => {
* //equal will be false since the shapes of the tensors do not match
* });
* ```
*/
compare(tensor, epsilon) {
return __awaiter(this, void 0, void 0, function* () {
if (!compareShapes(this.getShape(), tensor.getShape())) {
return false;
}
const arrA = yield this.getValues();
const arrB = yield tensor.getValues();
if (epsilon !== undefined) {
for (let i = 0; i < arrA.length; i += 1) {
if (Math.abs(arrA[i] - arrB[i]) > epsilon) {
return false;
}
}
}
else {
for (let i = 0; i < arrA.length; i += 1) {
if (arrA[i] !== arrB[i]) {
return false;
}
}
}
return true;
});
}
getAxes(axes) {
let ax;
const sh = this.getShape();
if (axes === undefined) {
ax = [];
for (let i = 0; i < sh.length; i++) {
ax.push(i);
}
}
else if (!(axes instanceof Array)) {
ax = [axes];
}
else {
ax = axes;
for (let i = 0; i < ax.length; i++) {
if (ax[i] < 0) {
ax[i] += this.getShape().length;
}
}
}
return ax;
}
/**
* Sums over the specified axis/axes.
*
* @param axes One or multiple axes to sum over. If not specified this will sum over all axes
* @param keepDims Wether the summation axes will be kept with size 1
*
* @example
* ```typescript
* const a = new CPUTensor([2,3], [1,2,3,4,5,6]);
*
* a.sum(); //Will be [21]
* a.sum(0); //Will be [5,7,9]
* a.sum(1); //Will [6,15]
* a.sum(0, true); //Will be [[5,7,9]]
* ```
*/
sum(axes, keepDims) {
const ax = this.getAxes(axes);
keepDims = keepDims || false;
return this.sum_impl(ax, keepDims);
}
/**
* Sums over the specified axis/axes with the entries of the tensor squared.
* This is equal to `a.multiply(a).sum(axes, keepDims)` but faster
*
* @param axes One or multiple axes to sum over. If not specified this will sum over all axes
* @param keepDims Wether the summation axes will be kept with size 1
*
*/
sumSquare(axes, keepDims) {
const ax = this.getAxes(axes);
keepDims = keepDims || false;
return this.sumSquare_impl(ax, keepDims);
}
/**
* Takes the product over specified axis/axes.
*
* @param axes One or multiple axes to take the product over. If not specified this will be all axes
* @param keepDims Wether the product axes will be kept with size 1
*
* @example
* ```typescript
* const a = new CPUTensor([2,3], [1,2,3,4,5,6]);
*
* a.product(); //Will be [720]
* a.product(0); //Will be [4,10,18]
* a.product(1); //Will [6,120]
* a.product(0, true); //Will be [[4,10,18]]
* ```
*/
product(axes, keepDims) {
const ax = this.getAxes(axes);
keepDims = keepDims || false;
return this.product_impl(ax, keepDims);
}
/**
* Takes the maximum over specified axis/axes.
*
* @param axes One or multiple axes to take the maximum over. If not specified this will be all axes
* @param keepDims Wether the maximum axes will be kept with size 1
*
* @example
* ```typescript
* const a = new CPUTensor([2,3], [1,2,3,4,5,6]);
*
* a.max(); //Will be [6]
* a.max(0); //Will be [4,5,6]
* a.max(1); //Will [3,6]
* a.max(0, true); //Will be [[4,5,6]]
* ```
*/
max(axes, keepDims) {
const ax = this.getAxes(axes);
keepDims = keepDims || false;
return this.max_impl(ax, keepDims);
}
/**
* Takes the minimum over specified axis/axes.
*
* @param axes One or multiple axes to take the minimum over. If not specified this will be all axes
* @param keepDims Wether the minimum axes will be kept with size 1
*
* @example
* ```typescript
* const a = new CPUTensor([2,3], [1,2,3,4,5,6]);
*
* a.min(); //Will be [1]
* a.min(0); //Will be [1,2,3]
* a.min(1); //Will [1,4]
* a.min(0, true); //Will be [[1,2,3]]
* ```
*/
min(axes, keepDims) {
const ax = this.getAxes(axes);
keepDims = keepDims || false;
return this.min_impl(ax, keepDims);
}
/**
* Takes the mean over the specified axis/axes.
* This is equal to `a.sum(axes, keepDims).divide(sumSize)` (where sumSize is the number
* of entries in the summation axes) but faster.
*
* @param axes One or multiple axes to take the mean over. If not specified this will take the mean over all axes
* @param keepDims Wether the mean axes will be kept with size 1
*
*/
reduceMean(axes, keepDims) {
const ax = this.getAxes(axes);
keepDims = keepDims || false;
return this.reduceMean_impl(ax, keepDims);
}
/**
* Takes the log of the sum over the specified axis
* This is equal to `a.sum(axes, keepDims).log()` (where sumSize is the number
* of entries in the summation axes) but faster.
*
* Note that this can only be called on tensors with a float data type (float64, float32, float16)
*
* @param axes One or multiple axes to take the mean over. If not specified this will take the mean over all axes
* @param keepDims Wether the mean axes will be kept with size 1
*
*/
reduceLogSum(axes, keepDims) {
const ax = this.getAxes(axes);
keepDims = keepDims || false;
return this.reduceLogSum_impl(ax.sort(), keepDims);
}
/**
* Takes the log of the sum over the exp of the specified axis
* This is equal to `a.sum(axes, keepDims).log()` (where sumSize is the number
* of entries in the summation axes) but faster.
*
* Note that this can only be called on tensors with a float data type (float64, float32, float16)
*
* @param axes One or multiple axes to take the mean over. If not specified this will take the mean over all axes
* @param keepDims Wether the mean axes will be kept with size 1
*
*/
reduceLogSumExp(axes, keepDims) {
const ax = this.getAxes(axes);
keepDims = keepDims || false;
return this.reduceLogSumExp_impl(ax, keepDims);
}
/**
* Takes the mean over the specified axis/axes with the entries of the tensor squared.
* This is equal to `a.multiply(a).sum(axes, keepDims).divide(sumSize)` (where sumSize is the number
* of entries in the summation axes) but faster.
*
* @param axes One or multiple axes to take the mean over. If not specified this will take the mean over all axes
* @param keepDims Wether the mean axes will be kept with size 1
*
*/
reduceMeanSquare(axes, keepDims) {
const ax = this.getAxes(axes);
keepDims = keepDims || false;
return this.reduceMeanSquare_impl(ax, keepDims);
}
/**
* Convolves this tensor with the specified kernel.
*
* This tensor should have shape [N,C,D1,D2,...] where D1,D2,... are the spatial dimensions.
*
* Behaves according to https://github.com/onnx/onnx/blob/master/docs/Operators.md#Conv
*
* @param kernel Convolution kernel with shape [M,C/G,K1,K2] where G is the group parameter
* @param bias Optional bias to add to the result with shape [M]
* @param dilations Per axis dilations for the spatial dimension. Defaults to 1 for all axes
* @param group Group parameter
* @param pads Padding to add to the input for each spatial dimension. Defaults to 0 for all axes
* @param strides Convolution stride for each spatial dimension. Defaults to 1 for all axes
* @param activation Optional activation to apply. Defaults to the identity (so no activation)
*/
conv(kernel, bias, dilations, group, pads, strides, activation) {
const sh = this.getShape();
const dataRank = sh.length - 2;
dilations = dilations || new Array(dataRank).fill(1);
group = group || 1;
pads = pads || new Array(dataRank * 2).fill(0);
strides = strides || new Array(dataRank).fill(1);
if (activation === undefined) {
activation = 'id';
}
return this.conv_impl(kernel, dilations, group, pads, strides, activation, bias);
}
/**
* Calculates the transpose convolution
*
* This tensor should have shape [N,C,D1,D2,...] where D1,D2,... are the spatial dimensions.
*
* @param kernel Convolution kernel with shape [M,C/G,K1,K2] where G is the group parameter
* @param dilations Per axis dilations for the spatial dimension. Defaults to 1 for all axes
* @param group Group parameter
* @param pads Padding to add to the input for each spatial dimension. Defaults to 0 for all axes
* @param strides Convolution stride for each spatial dimension. Defaults to 1 for all axes
*/
convTranspose(kernel, dilations, group, pads, strides) {
const sh = this.getShape();
const dataRank = sh.length - 2;
dilations = dilations || new Array(dataRank).fill(1);
group = group || 1;
pads = pads || new Array(dataRank * 2).fill(0);
strides = strides || new Array(dataRank).fill(1);
return this.convTranspose_impl(kernel, dilations, group, pads, strides);
}
/**
* Pads the input according to the padding mode. The input has shape [D1,D2,..]
*
* @example
* ```typescript
* const a = new CPUTensor([2,2],[1,2,3,4]);
* a.pad([1,1,1,1],'constant',5);
* //Result will be:
* // [[5,5,5,5],
* // [5,1,2,5],
* // [5,3,4,5],
* // [5,5,5,5]]
* a.pad([1,1,1,1],'edge');
* //Result will be:
* // [[1,1,2,2],
* // [1,1,2,2],
* // [3,3,4,4],
* // [3,3,4,4]]
*
* a.pad([2,2,2,2],'reflect');
* //Result will be:
* // [[4,3,3,4,4,3],
* // [2,1,1,2,2,1],
* // [2,1,1,2,2,1],
* // [4,3,3,4,4,3],
* // [4,3,3,4,4,3],
* // [2,1,1,2,2,1]]
* ```
*
* @param pads Padding size of each input. Specified as [startpad_D1,startpad_D2,...,startpad_DN,endpad_D1,endpad_D2,...]
* @param mode Padding mode. One of 'constant', 'edge', 'reflect'. Defaults to 'constant'
* @param value Value for constant padding. Defaults to 0.0
*/
pad(pads, mode, value) {
if (mode === undefined) {
mode = 'constant';
}
if (value === undefined) {
value = 0;
}
return this.pad_impl(pads, mode, value);
}
/**
* Performs average pooling over the spatial dimensions of this tensor with
* shape [N,C,D1,D2,..]
* @param kernelShape Size of the average pooling dimension
* @param pads Padding of the input specified as [startpad_D1,startpad_D2,...,startpad_DN,endpad_D1,endpad_D2,...]
* Padding value will be 0. Defaults to 0 for all axes
* @param strides Stride size of the average pooling kernel. Defaults to 1 for all axes
* @param includePad Wether padded values should be included in the average (or masked out). Defaults to false
*/
averagePool(kernelShape, pads, strides, includePad) {
const sh = this.getShape();
const dataRank = sh.length - 2;
pads = pads || new Array(dataRank * 2).fill(0);
strides = strides || new Array(dataRank).fill(1);
includePad = includePad || false;
return this.averagePool_impl(kernelShape, pads, strides, includePad);
}
/**
* Reshape the tensor to the specified shape
*
* At most one value in the shape can be -1, which will be replaced by the inferred size for this dimension.
*
* @param shape New shape of the tensor
* @param copy Wether the tensor values should be copied. Only has an effect on GPU tensors
*/
reshape(shape, copy) {
let shSize = 1;
let negIndex = -1;
for (let i = 0; i < shape.length; i++) {
if (shape[i] === -1) {
negIndex = i;
}
else {
shSize *= shape[i];
}
}
if (copy === undefined) {
copy = true;
}
if (negIndex !== -1) {
const currShape = this.getShape();
const currSize = getSize(currShape);
const _shape = [...shape];
_shape[negIndex] = currSize / shSize;
return this.reshape_impl(_shape, copy);
}
return this.reshape_impl(shape, copy);
}
alignShapes(shape1, shape2) {
if (compareShapes(shape1, shape2)) {
return [shape1, shape2, shape1];
}
if (shape1.length < shape2.length) {
shape1 = [...shape1];
const prepend = shape2.length - shape1.length;
shape1.unshift(...new Array(prepend).fill(1));
}
else if (shape2.length < shape1.length) {
shape2 = [...shape2];
const prepend = shape1.length - shape2.length;
shape2.unshift(...new Array(prepend).fill(1));
}
const resultShape = new Array(shape1.length).fill(1);
for (let i = 0; i < shape1.length; i++) {
resultShape[i] = Math.max(shape1[i], shape2[i]);
}
return [shape1, shape2, resultShape];
}
/**
* Align the shapes of this tensor and the given tensor according to
* the broadcasting rules:
* https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md
*
* @param tensor Tensor of which the shapes should be aligned
*/
alignTensor(tensor) {
let thisShape = this.getShape();
let thatShape = tensor.getShape();
if (compareShapes(thisShape, thatShape)) {
return [this, tensor, thisShape];
}
// eslint-disable-next-line @typescript-eslint/no-this-alias
let th = this;
if (thisShape.length < thatShape.length) {
thisShape = [...thisShape];
const prepend = thatShape.length - thisShape.length;
thisShape.unshift(...new Array(prepend).fill(1));
th = this.reshape(thisShape, false);
}
else if (thatShape.length < thisShape.length) {
thatShape = [...thatShape];
const prepend = thisShape.length - thatShape.length;
thatShape.unshift(...new Array(prepend).fill(1));
tensor = tensor.reshape(thatShape, false);
}
const resultShape = new Array(thisShape.length).fill(1);
for (let i = 0; i < thisShape.length; i++) {
resultShape[i] = Math.max(thisShape[i], thatShape[i]);
}
return [th, tensor, resultShape];
}
/**
* Adds two tensors. Supports broadcasting
*
* @example
* ```typescript
* const a = new CPUTensor([2,2],[1,2,3,4]);
* const b = new CPUTensor([2,2],[5,6,7,8]);
* const c = new CPUTensor([1],[2]);
*
* a.add(b);
* //Will be
* // [[6,8],
* // [10,12]]
*
* a.add(c);
* //Will be
* // [[3,4],
* // [5,6]]
* ```
*/
add(tensor, alpha, beta) {
if (alpha === undefined) {
alpha = 1;
}
if (beta === undefined) {
beta = 1;
}
const [th, tens, resultShape] = this.alignTensor(tensor);
return this.add_impl(th, tens, resultShape, alpha, beta);
}
/**
* Subtracts two tensors. Supports broadcasting
*
* @example
* ```typescript
* const a = new CPUTensor([2,2],[5,6,7,8]);
* const b = new CPUTensor([2,2],[1,2,3,4]);
* const c = new CPUTensor([1],[2]);
*
* a.subtract(b);
* //Will be
* // [[4,4],
* // [4,4]]
*
* a.subtract(c);
* //Will be
* // [[3,4],
* // [5,6]]
* ```
*/
subtract(tensor, alpha, beta) {
if (alpha === undefined) {
alpha = 1;
}
if (beta === undefined) {
beta = 1;
}
const [th, tens, resultShape] = this.alignTensor(tensor);
return this.subtract_impl(th, tens, resultShape, alpha, beta);
}
/**
* Multiplies two tensors. Supports broadcasting
*
* @example
* ```typescript
* const a = new CPUTensor([2,2],[1,2,3,4]);
* const b = new CPUTensor([2,2],[5,6,7,8]);
* const c = new CPUTensor([1],[2]);
*
* a.multiply(b);
* //Will be
* // [[5,12],
* // [21,32]]
*
* a.multiply(c);
* //Will be
* // [[2,4]
* [6,8]]
* ```
*/
multiply(tensor, alpha) {
if (alpha === undefined) {
alpha = 1;
}
const [th, tens, resultShape] = this.alignTensor(tensor);
return this.multiply_impl(th, tens, resultShape, alpha);
}
multiplyScalar(value) {
return this.addMultiplyScalar(value, 0);
}
addScalar(value) {
return this.addMultiplyScalar(1, value);
}
/**
* Divides two tensors. Supports broadcasting
*
* @example
* ```typescript
* const a = new CPUTensor([2,2],[5,6,7,8]);
* const b = new CPUTensor([2,2],[1,2,3,4]);
* const c = new CPUTensor([1],[2]);
*
* a.divide(b);
* //Will be
* // [[5,3],
* // [2.333,2]]
*
* a.divide(c);
* //Will be
* // [[2.5,3],
* // [3.5,4]]
* ```
*/
divide(tensor, alpha) {
if (alpha === undefined) {
alpha = 1;
}
const [th, tens, resultShape] = this.alignTensor(tensor);
return this.divide_impl(th, tens, resultShape, alpha);
}
/**
* Takes the positionwise power. Supports broadcasting
*
* @example
* ```typescript
* const a = new CPUTensor([2,2],[5,6,7,8]);
* const b = new CPUTensor([2,2],[2,3,2,3]);
* const c = new CPUTensor([1],[2]);
*
* a.power(b);
* //Will be
* // [[25,216],
* // [49,512]]
*
* a.power(c);
* //Will be
* // [[25,36],
* // [49,64]]
* ```
*/
power(tensor) {
const [th, tens, resultShape] = this.alignTensor(tensor);
return this.power_impl(th, tens, resultShape);
}
/**
* Transposes the tensor according to the given permutation
*
* @example
* ```typescript
* const a = new CPUTensor([2,2],[5,6,7,8]);
*
* a.transpose();
* //Will be
* // [[5,7],
* // [6,8]]
* ```
* @param permutation Permutation for the axes. Default is the reverse axis order
*/
transpose(permutation) {
if (permutation === undefined) {
const shape = this.getShape();
const rank = shape.length;
permutation = [];
for (let i = 0; i < rank; i++) {
permutation.push(rank - i - 1);
}
}
return this.transpose_impl(permutation);
}
/**
* Takes the softmax along the given axis
* https://en.wikipedia.org/wiki/Softmax_function
*
* Note that this can only be called on tensors with a float data type (float64, float32, float16)
*/
softmax(axis) {
const max = this.max(axis, true);
const normalized = this.subtract(max);
const exp = normalized.exp();
const sum = exp.sum(axis, true);
const result = exp.divide(sum);
max.delete();
normalized.delete();
exp.delete();
sum.delete();
return result;
}
/**
* Calculates the general matrix product.
* https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3
*
* A and B can have batch dimensions. Their last two dimensions should
* correspond to the dimensions for the matrix product
*
* @param b Second matrix for the matrix product
* @param aTranspose If the last two dimensions of a are transposed. Defaults to false
* @param bTranspose If the last two dimensions of a are transposed. Defaults to false
* @param alpha Alpha parameter. Defaults to 1.0
* @param c Optional tensor to add to the result.
* @param beta Beta parameter, only used if c is specified. Defaults to 1.0
*/
gemm(b, aTranspose, bTranspose, alpha, c, beta) {
aTranspose = aTranspose || false;
bTranspose = bTranspose || false;
alpha = alpha !== undefined ? alpha : 1;
beta = beta !== undefined ? beta : 1;
if (c !== undefined) {
const aShape = this.getShape();
let cShape = c.getShape();
const aRank = aShape.length;
const cRank = cShape.length;
if (aRank > cRank) {
cShape = [...new Array(aRank - cRank).fill(1), ...cShape];
c = c.reshape(cShape, false);
}
}
return this.gemm_impl(b, aTranspose, bTranspose, alpha, beta, c);
}
/**
* Takes a slice of the tensor along the specified axes.
*
* @example
* ```typescript
* const a = new CPUTensor([2,2],[5,6,7,8]);
*
* a.slice([0],[1],[0]);
* //Will be
* // [[5,6]]
*
* a.slice([0],[1],[1]);
* //Will be
* // [[5],
* [6]]
* ```
*
* @param starts Start of the slice for each axis
* @param ends End of the slice for each axis - Exclusive (the end index will not be included in the slice)
* @param axes Axes to slice. Defaults to all axes
*/
slice(starts, ends, axes, steps) {
const shape = this.getShape();
const rank = shape.length;
if (axes === undefined) {
axes = [];
for (let i = 0; i < rank; i++) {
axes.push(i);
}
}
else {
axes = axes.map(x => (x < 0 ? x + rank : x));
}
if (steps === undefined) {
steps = new Array(rank).fill(1);
}
starts = [...starts];
ends = [...ends];
for (let i = 0; i < axes.length; i++) {
const sh = shape[axes[i]];
if (starts[i] < 0) {
starts[i] += sh;
}
else if (starts[i] >= sh) {
if (steps[i] > 0) {
starts[i] = sh;
}
else {
starts[i] = sh - 1;
}
}
if (ends[i] < 0) {
ends[i] += sh;
}
else if (ends[i] >= sh) {
ends[i] = sh;
}
}
return this.slice_impl(starts, ends, axes, steps);
}
squeeze() {
const sh = this.getShape();
const newShape = [];
for (const a of sh) {
if (a !== 1) {
newShape.push(a);
}
}
return this.reshape(newShape);
}
flatten(axis) {
if (axis === undefined) {
axis = 1;
}
const sh = this.getShape();
if (axis < 0) {
axis += sh.length;
}
const newShape = [
getSize(sh.slice(0, axis), 1),
getSize(sh.slice(axis), 1),
];
return this.reshape(newShape);
}
}
//# sourceMappingURL=types.js.map