gradiatorjs
Version:
GradiatorJS is a lightweight, from-scratch autodiff engine and a neural network library written in typescript. Featuring a powerful automatic differentiation engine using a computation graph to enable backpropagation on dynamic network architectures. You
291 lines (270 loc) • 12.3 kB
text/typescript
declare class Val {
private _data;
grad: Float64Array;
_backward: () => void;
_prev: Set<Val>;
shape: number[];
size: number;
constructor(shape: number[], value?: number);
backward(): void;
set data(a: any);
private createArr;
private flattenND;
get data(): any;
private calculateSizeFromShape;
private calculateShape;
private zeros;
get dim(): number;
private filled;
clone(): Val;
get T(): Val;
reshape(newShape: number[]): Val;
randn(): Val;
gradVal(): Val;
}
declare function calcBinaryAccuracy(y_pred_val: Val, y_true_val: Val, threshold?: number): number;
declare function calcMultiClassAccuracy(y_pred_val: Val, y_true_val: Val): number;
declare const accuracy_calcBinaryAccuracy: typeof calcBinaryAccuracy;
declare const accuracy_calcMultiClassAccuracy: typeof calcMultiClassAccuracy;
declare namespace accuracy {
export { accuracy_calcBinaryAccuracy as calcBinaryAccuracy, accuracy_calcMultiClassAccuracy as calcMultiClassAccuracy };
}
declare function relu(Z: Val): Val;
declare function sigmoid(Z: Val): Val;
declare function tanh(Z: Val): Val;
declare function softmax(Z: Val): Val;
declare const activations_relu: typeof relu;
declare const activations_sigmoid: typeof sigmoid;
declare const activations_softmax: typeof softmax;
declare const activations_tanh: typeof tanh;
declare namespace activations {
export { activations_relu as relu, activations_sigmoid as sigmoid, activations_softmax as softmax, activations_tanh as tanh };
}
declare class Module {
last_Z: Val | null;
last_A: Val | null;
parameters(): Val[];
zeroGrad(): void;
forward(X: Val): Val;
toJSON(): any;
}
declare class Dense extends Module {
W: Val;
B: Val;
activation?: (t: Val) => Val;
readonly nin: number;
readonly nout: number;
constructor(nin: number, nout: number, activation?: (t: Val) => Val);
forward(X_input: Val): Val;
toJSON(): any;
}
declare class Conv extends Module {
kernel: Val;
biases: Val;
stride: number;
padding: number;
activation?: (t: Val) => Val;
readonly in_channels: number;
readonly out_channels: number;
readonly kernel_size: number;
constructor(in_channels: number, out_channels: number, kernel_size: number, stride: number, padding: number, activation?: (t: Val) => Val);
forward(X_input: Val): Val;
toJSON(): any;
}
declare class Flatten extends Module {
forward(X: Val): Val;
toJSON(): any;
}
declare class MaxPool2D extends Module {
pool_size: number;
stride: number;
constructor(pool_size: number, stride: number);
forward(X: Val): Val;
toJSON(): any;
}
type layers_Conv = Conv;
declare const layers_Conv: typeof Conv;
type layers_Dense = Dense;
declare const layers_Dense: typeof Dense;
type layers_Flatten = Flatten;
declare const layers_Flatten: typeof Flatten;
type layers_MaxPool2D = MaxPool2D;
declare const layers_MaxPool2D: typeof MaxPool2D;
type layers_Module = Module;
declare const layers_Module: typeof Module;
declare namespace layers {
export { layers_Conv as Conv, layers_Dense as Dense, layers_Flatten as Flatten, layers_MaxPool2D as MaxPool2D, layers_Module as Module };
}
declare class Sequential extends Module {
layers: Module[];
constructor(...layers: Module[]);
forward(X: Val): Val;
getLayerOutputs(X: Val): {
Z: Val | null;
A: Val | null;
}[];
toJSON(): any;
}
type model_Sequential = Sequential;
declare const model_Sequential: typeof Sequential;
declare namespace model {
export { model_Sequential as Sequential };
}
declare function meanSquaredErrorLoss(y_pred: Val, y_true: Val): Val;
declare function crossEntropyLoss_binary(y_pred: Val, y_true: Val, e?: number): Val;
declare function crossEntropyLoss_categorical(y_pred: Val, y_true: Val): Val;
/**
* A combined softmax and categorical cross-entropy loss function. This is done to avoid passing the
* backprop through a log fn. if the softmax outputs a probability P that is extremely close to zero,
* log(P) will be -infinity and grad (1/x) will explode.
* Backprop (dL/dZ) here will be just: predicted_probabilities - true_labels
* @param logits raw, unnormalized output scores from the final dense layer (this is before softmax activation). [batch, numClasses]
* @param y_true true labels, one-hot encoded. [batch, numClasses]
*/
declare function crossEntropyLoss_softmax(logits: Val, y_true: Val): Val;
declare const loss_crossEntropyLoss_binary: typeof crossEntropyLoss_binary;
declare const loss_crossEntropyLoss_categorical: typeof crossEntropyLoss_categorical;
declare const loss_crossEntropyLoss_softmax: typeof crossEntropyLoss_softmax;
declare const loss_meanSquaredErrorLoss: typeof meanSquaredErrorLoss;
declare namespace loss {
export { loss_crossEntropyLoss_binary as crossEntropyLoss_binary, loss_crossEntropyLoss_categorical as crossEntropyLoss_categorical, loss_crossEntropyLoss_softmax as crossEntropyLoss_softmax, loss_meanSquaredErrorLoss as meanSquaredErrorLoss };
}
declare function add(t1: Val | number, t2: Val | number): Val;
declare function sub(t1: Val | number, t2: Val | number): Val;
declare function mul(t1: Val | number, t2: Val | number): Val;
declare function dot(t1: Val, t2: Val): Val;
declare function pow(t: Val, num: number): Val;
declare function div(t: Val, num: number): Val;
declare function divElementWise(t1: Val, t2: Val): Val;
declare function negate(t: Val): Val;
declare function abs(t: Val): Val;
declare function exp(t: Val): Val;
declare function log(t: Val): Val;
declare function sum(t: Val, axis?: number, keepdims?: boolean): Val;
declare function mean(t: Val, axis?: number, keepdims?: boolean): Val;
/**
* X: input : [batch_size, n_height, n_width, n_channels]
* F: filter : [c_outputchannels, filter_size, filter_size, n_channels]
* st: stride : default = 1
* pad: padding : default = 0
*/
declare function conv2d(X: Val, F: Val, st?: number, pad?: number): Val;
declare function maxPool2d(X: Val, pool_size: number, stride: number): Val;
declare const ops_abs: typeof abs;
declare const ops_add: typeof add;
declare const ops_conv2d: typeof conv2d;
declare const ops_div: typeof div;
declare const ops_divElementWise: typeof divElementWise;
declare const ops_dot: typeof dot;
declare const ops_exp: typeof exp;
declare const ops_log: typeof log;
declare const ops_maxPool2d: typeof maxPool2d;
declare const ops_mean: typeof mean;
declare const ops_mul: typeof mul;
declare const ops_negate: typeof negate;
declare const ops_pow: typeof pow;
declare const ops_sub: typeof sub;
declare const ops_sum: typeof sum;
declare namespace ops {
export { ops_abs as abs, ops_add as add, ops_conv2d as conv2d, ops_div as div, ops_divElementWise as divElementWise, ops_dot as dot, ops_exp as exp, ops_log as log, ops_maxPool2d as maxPool2d, ops_mean as mean, ops_mul as mul, ops_negate as negate, ops_pow as pow, ops_sub as sub, ops_sum as sum };
}
type LayerType = 'dense' | 'conv' | 'flatten' | 'maxpool';
type ActivationType = 'relu' | 'sigmoid' | 'tanh' | 'softmax';
interface MinMaxInfo {
minv: number;
maxv: number;
dv: number;
}
interface NetworkParams {
loss_fn: (Y_pred: Val, Y_true: Val) => Val;
l_rate: number;
epochs: number;
batch_size: number;
multiClass: boolean;
}
interface TrainingProgress {
epoch: number;
batch_idx: number;
loss: number;
accuracy: number;
iterTime: number;
visData: {
sampleX: Val;
sampleY_label: number;
layerOutputs: {
Z: Val | null;
A: Val | null;
}[];
};
}
interface Messenger {
postMessage(data: any, transfer?: Transferable[]): void;
}
type types_ActivationType = ActivationType;
type types_LayerType = LayerType;
type types_Messenger = Messenger;
type types_MinMaxInfo = MinMaxInfo;
type types_NetworkParams = NetworkParams;
type types_TrainingProgress = TrainingProgress;
declare namespace types {
export type { types_ActivationType as ActivationType, types_LayerType as LayerType, types_Messenger as Messenger, types_MinMaxInfo as MinMaxInfo, types_NetworkParams as NetworkParams, types_TrainingProgress as TrainingProgress };
}
type trainingState = 'IDLE' | 'TRAINING' | 'PAUSED' | 'STOPPING';
declare const getIsTraining: () => boolean;
declare const getIsPaused: () => boolean;
declare const getStopTraining: () => boolean;
declare function getTrainingContext(): {
model: Sequential | null;
X_train: Val | null;
Y_train: Val | null;
params: NetworkParams | null;
currentEpoch: number;
batchGenerator: Generator<any, void, unknown> | null;
iteration: number;
};
declare function startTraining(): void;
declare function requestStopTraining(): void;
declare function endTraining(): void;
declare function requestPause(): void;
declare function requestResume(): void;
declare function setTrainingState(newState: trainingState): void;
declare function setupTrainingContext(model: Sequential, x: Val, y: Val, params: NetworkParams): void;
declare function advanceEpoch(): boolean;
declare const state_management_advanceEpoch: typeof advanceEpoch;
declare const state_management_endTraining: typeof endTraining;
declare const state_management_getIsPaused: typeof getIsPaused;
declare const state_management_getIsTraining: typeof getIsTraining;
declare const state_management_getStopTraining: typeof getStopTraining;
declare const state_management_getTrainingContext: typeof getTrainingContext;
declare const state_management_requestPause: typeof requestPause;
declare const state_management_requestResume: typeof requestResume;
declare const state_management_requestStopTraining: typeof requestStopTraining;
declare const state_management_setTrainingState: typeof setTrainingState;
declare const state_management_setupTrainingContext: typeof setupTrainingContext;
declare const state_management_startTraining: typeof startTraining;
type state_management_trainingState = trainingState;
declare namespace state_management {
export { state_management_advanceEpoch as advanceEpoch, state_management_endTraining as endTraining, state_management_getIsPaused as getIsPaused, state_management_getIsTraining as getIsTraining, state_management_getStopTraining as getStopTraining, state_management_getTrainingContext as getTrainingContext, state_management_requestPause as requestPause, state_management_requestResume as requestResume, state_management_requestStopTraining as requestStopTraining, state_management_setTrainingState as setTrainingState, state_management_setupTrainingContext as setupTrainingContext, state_management_startTraining as startTraining, type state_management_trainingState as trainingState };
}
declare function getMiniBatch(X: Val, Y: Val, batchSize: number, shuffle?: boolean): Generator<{
x: Val;
y: Val;
}, void, unknown>;
declare function trainModel(model: Sequential, X_train: Val, Y_train: Val, params: NetworkParams, messenger?: Messenger): Promise<void>;
declare function trainSingleBatch(messenger: Messenger): Promise<void>;
declare function assert(expr: boolean, msg: () => string): void;
declare function gaussianRandom(mean?: number, stdev?: number): number;
declare function arraysEqual(a: Float64Array, b: Float64Array): boolean;
declare function broadcast(t1: Val | number, t2: Val | number): [Val, Val];
declare function reduceGradient(gradient: Float64Array, originalShape: number[], broadcastedShape: number[]): Float64Array;
declare function calculateMinMax(data: Float64Array): MinMaxInfo;
declare const utils_arraysEqual: typeof arraysEqual;
declare const utils_assert: typeof assert;
declare const utils_broadcast: typeof broadcast;
declare const utils_calculateMinMax: typeof calculateMinMax;
declare const utils_gaussianRandom: typeof gaussianRandom;
declare const utils_reduceGradient: typeof reduceGradient;
declare namespace utils {
export { utils_arraysEqual as arraysEqual, utils_assert as assert, utils_broadcast as broadcast, utils_calculateMinMax as calculateMinMax, utils_gaussianRandom as gaussianRandom, utils_reduceGradient as reduceGradient };
}
export { Val, accuracy, activations as act, getMiniBatch, layers as layer, loss, model, ops as op, state_management as state, trainModel, trainSingleBatch, types, utils as util };