@jsmlt/jsmlt
Version:
JavaScript Machine Learning
506 lines (409 loc) • 22.1 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", {
value: true
});
exports["default"] = void 0;
var _base = require("../base");
var Arrays = _interopRequireWildcard(require("../../arrays"));
var Random = _interopRequireWildcard(require("../../random"));
function _getRequireWildcardCache() { if (typeof WeakMap !== "function") return null; var cache = new WeakMap(); _getRequireWildcardCache = function _getRequireWildcardCache() { return cache; }; return cache; }
function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } var cache = _getRequireWildcardCache(); if (cache && cache.has(obj)) { return cache.get(obj); } var newObj = {}; if (obj != null) { var hasPropertyDescriptor = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { var desc = hasPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : null; if (desc && (desc.get || desc.set)) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } } newObj["default"] = obj; if (cache) { cache.set(obj, newObj); } return newObj; }
function _typeof(obj) { if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; } return _typeof(obj); }
function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); }
function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); }
function _iterableToArrayLimit(arr, i) { if (!(Symbol.iterator in Object(arr) || Object.prototype.toString.call(arr) === "[object Arguments]")) { return; } var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; }
function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }
function _toConsumableArray(arr) { return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _nonIterableSpread(); }
function _nonIterableSpread() { throw new TypeError("Invalid attempt to spread non-iterable instance"); }
function _iterableToArray(iter) { if (Symbol.iterator in Object(iter) || Object.prototype.toString.call(iter) === "[object Arguments]") return Array.from(iter); }
function _arrayWithoutHoles(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = new Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } }
function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); if (enumerableOnly) symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; }); keys.push.apply(keys, symbols); } return keys; }
function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i] != null ? arguments[i] : {}; if (i % 2) { ownKeys(source, true).forEach(function (key) { _defineProperty(target, key, source[key]); }); } else if (Object.getOwnPropertyDescriptors) { Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)); } else { ownKeys(source).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } } return target; }
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }
function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }
function _possibleConstructorReturn(self, call) { if (call && (_typeof(call) === "object" || typeof call === "function")) { return call; } return _assertThisInitialized(self); }
function _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return self; }
function _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }
function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }
/**
* Calculate the logit function for an input
*
* @param {number} x - Input number
* @return {number} Output of logit function applied on input
*/
function sigmoid(x) {
return 1 / (1 + Math.exp(-x));
}
/**
* Fully connected neural network with variable number of hidden layers.
*/
var FullyConnected =
/*#__PURE__*/
function (_Classifier) {
_inherits(FullyConnected, _Classifier);
/**
* Constructor. Initialize class members and store user-defined options.
*
* @param {Object} [optionsUser] User-defined options
* @param {number} [optionsUser.numInputs = 'auto'] Number of features each input sample has.
* The first layer of the network has this (plus one bias node) as the number of nodes. Defaults
* to 'auto', which determines the number of input nodes on the dimensionality of the training
* data upon the training call
* @param {number} [optionsUser.numOutputs = 'auto'] Number of possible outputs for the network.
* The final layer of the network has this as the number of nodes. Defaults to 'auto', which
* determines the number of input nodes on the number of unique labels in the data upon the
* training call
* @param {Array.<number>} [optionsUser.hiddenLayers = []] Number of nodes in the hidden layers.
* Each entry in this array corresponds to a single hidden layer
* @param {number} [optionsUser.numEpochs = 20] Number of epochs (i.e., passes over all training
* data) to train the network for
* @param {number} [optionsUser.learningRate = 0.01] Learning rate for training
*/
function FullyConnected() {
var _this;
var optionsUser = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
_classCallCheck(this, FullyConnected);
_this = _possibleConstructorReturn(this, _getPrototypeOf(FullyConnected).call(this)); // Parse options
var optionsDefault = {
numInputs: 'auto',
numOutputs: 'auto',
hiddenLayers: [],
numEpochs: 20,
learningRate: 0.01
};
var options = _objectSpread({}, optionsDefault, {}, optionsUser);
_this.numInputs = options.numInputs;
_this.numOutputs = options.numOutputs;
_this.hiddenLayers = options.hiddenLayers;
_this.numEpochs = options.numEpochs;
_this.learningRate = options.learningRate; // Initialize layers, connectivity, and weights
/**
* Number of nodes (including bias nodes) in each layer of the network. Filled at the start of
* training.
*
* @type {Array.<number>}
*/
_this.layers = [];
/**
* Weights between each pair of nodes in subsequent layers. Each entry in the main array
* contains a matrix of weights between the nodes in that layer and the nodes in the next layer.
* This includes entries for weights between unconnected (e.g., where the output node is a bias
* node) nodes
*
* @type {Array.<Array.<Array.<number>>>}
*/
_this.weights = [];
/**
* Boolean matrix of connectivity between each pair of nodes in subsequent layers. For format,
* see {@link FullyConnected#weights}.
*
* @type {Array.<Array.<Array.<boolean>>>}
*/
_this.connectivity = [];
return _this;
}
/**
* Randomly initialize the weights for the neural network. For each subsequent pair of layers,
* where the first has n nodes and the second n' nodes, initialize an matrix with n rows and n'
* columns. Each cell in the matrix is assigned a random value in the range [-1, 1]. Furthermore,
* the connectivity of each pair of nodes in subsequent layers is stored (where all nodes in each
* layer are connected to all non-bias nodes in the next layer).
*
* The weights between layer k and layer k + 1 are stored in element k (starting at k = 0) of the
* weights array.
*/
_createClass(FullyConnected, [{
key: "initializeWeights",
value: function initializeWeights() {
this.weights = [];
this.connectivity = []; // Initialize weights for each subsequent pair of layers
for (var i = 0; i < this.layers.length - 1; i += 1) {
// Shape of the weight and connectivity matrices for the weights between the nodes in this and
// the next layer
var shape = [this.layers[i], this.layers[i + 1]]; // Initialize weights from this layer to the next layer to a random real number in the
// range [-1, 1]
this.weights.push(Arrays.full(shape, function () {
return Random.rand(-1, 1);
})); // Initialize connectivity between nodes by connecting all nodes (including bias nodes; these
// are removed in the next few lines)
var connectivity = Arrays.full(shape, true); // All layers but the last layer have a bias node: remove the connections between all nodes
// and bias nodes in the next layer
if (i < this.layers.length - 2) {
var _iteratorNormalCompletion = true;
var _didIteratorError = false;
var _iteratorError = undefined;
try {
for (var _iterator = connectivity[Symbol.iterator](), _step; !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true) {
var x = _step.value;
x[0] = false;
}
} catch (err) {
_didIteratorError = true;
_iteratorError = err;
} finally {
try {
if (!_iteratorNormalCompletion && _iterator["return"] != null) {
_iterator["return"]();
}
} finally {
if (_didIteratorError) {
throw _iteratorError;
}
}
}
}
this.connectivity.push(connectivity);
}
}
/**
* @see {@link Classifier#train}
*/
}, {
key: "train",
value: function train(X, y) {
// Determine number of inputs (one input for each feature sample) and number of outputs (one
// output for each possible class) automatically depending on user settings
var numInputs = this.numInputs === 'auto' ? X[0].length : this.numInputs;
var numOutputs = this.numOutputs === 'auto' ? Arrays.unique(y).length : this.numOutputs; // Initialize layers
this.layers = [numInputs + 1].concat(_toConsumableArray(this.hiddenLayers), [numOutputs]); // Initialize weights arrays
this.initializeWeights(); // Train for specified number of epochs
for (var i = 0; i < this.numEpochs; i += 1) {
this.trainEpoch(X, y);
}
}
/**
* Train the network for one epoch. Samples will be shuffled inside this function before training.
*
* @param {Array.<Array.<number>>} X - Features of samples to train with
* @param {Array.<mixed>} y - Labels of samples
*/
}, {
key: "trainEpoch",
value: function trainEpoch(X, y) {
// Shuffle data points
var _Arrays$shuffle = Arrays.shuffle(X, y),
_Arrays$shuffle2 = _slicedToArray(_Arrays$shuffle, 2),
XUse = _Arrays$shuffle2[0],
yUse = _Arrays$shuffle2[1]; // Train for each sample individually
for (var i = 0; i < XUse.length; i += 1) {
this.trainSample(XUse[i].slice(), yUse[i]);
}
}
/**
* Calculate root-mean-square error of the network on some data set.
*
* @param {Array.<Array.<number>>} X - Features of samples to calculate RMSE for
* @param {Array.<mixed>} y - Labels of samples
* @return {number} Root-mean-squared error
*/
}, {
key: "calculateRMSE",
value: function calculateRMSE(X, y) {
var _this2 = this;
return Math.sqrt(X.reduce(function (a, x, i) {
return a + Math.pow(_this2.calculateError(x, y[i]), 2);
}, 0) / X.length);
}
/**
* Calculate the squared error between the network outputs for a sample and the specified outputs.
*
* @param {Array.<number>} x - Input sample
* @param {number} y - Sample label
* @return {number} Sum of squared errors between the outputs corresponding to the sample label
* and the outputs obtained passing the sample through the network
*/
}, {
key: "calculateError",
value: function calculateError(x, y) {
var _this$forwardPass = this.forwardPass(x),
_this$forwardPass2 = _slicedToArray(_this$forwardPass, 2),
activations = _this$forwardPass2[0],
outputs = _this$forwardPass2[1];
return outputs[outputs.length - 1].reduce(function (a, o, i) {
return a + 0.5 * Math.pow(o - (y[i] === i), 2);
}, 0);
}
/**
* Apply the delta rule to the result of a forward pass through the network, expressed by the
* specified activations and outputs. The network targets corresponding to the forward pass need
* to be specified too.
*
* @param {Array.<Array.<number>>} activations - Network activations for each node in each layer
* @param {Array.<Array.<number>>} outputs - Network outputs (i.e., the activations passed through
* the activation function) for each node in each layer
* @param {Array.<number>} targets - Network targets for the final layer
* @return {Array.<Array.<number>>} Deltas calculated for each node in each layer. The deltas
* for the bias nodes are not calculated, and set to 0
*/
}, {
key: "deltaRule",
value: function deltaRule(activations, outputs, targets) {
var _this3 = this;
// Calculate deltas using the generalized delta rule
var deltas = this.layers.map(function (x) {
return Arrays.zeros(x);
}); // Start at the final layer, and calculate deltas going backward until the second layer
var _loop = function _loop(k) {
// Index of first regular node in this layer
var startNode = k < _this3.layers.length - 1 ? 1 : 0; // Loop over all non-bias nodes in the layer
var _loop2 = function _loop2(i) {
// Extract output and activation for this node
var activation = activations[k][i];
var output = outputs[k][i];
if (k === _this3.layers.length - 1) {
// Last layer
deltas[k][i] = _this3.activationFunctionDerivative(activation) * (output - targets[i]);
} else {
// Earlier layers
// Calculate sum of weighted deltas in next layer
var nextDeltaSum = deltas[k + 1].reduce(function (r, a, j) {
return r + a * _this3.weights[k][i][j];
}, 0);
deltas[k][i] = _this3.activationFunctionDerivative(activation) * nextDeltaSum;
}
};
for (var i = startNode; i < _this3.layers[k]; i += 1) {
_loop2(i);
}
};
for (var k = this.layers.length - 1; k > 0; k -= 1) {
_loop(k);
}
return deltas;
}
/**
* Train the network on a single sample
*
* @param {Array.<number>} x - Input sample
* @param {number} y - Sample label
*/
}, {
key: "trainSample",
value: function trainSample(x, y) {
// Pass the sample through the network
var _this$forwardPass3 = this.forwardPass(x),
_this$forwardPass4 = _slicedToArray(_this$forwardPass3, 2),
activations = _this$forwardPass4[0],
outputs = _this$forwardPass4[1]; // Apply one-hot encoding to the sample label
var targets = _toConsumableArray(Array(this.layers[this.layers.length - 1])).map(function (a, i) {
return i === y ? 1 : 0;
}); // Calculate of delta for each node in each layer
var deltas = this.deltaRule(activations, outputs, targets); // Update weights
for (var k = 0; k < this.layers.length - 1; k += 1) {
// console.log('Updating weights in layer ' + k);
// Loop over all pairs of connected nodes in layers k and k + 1
for (var i = 0; i < this.layers[k]; i += 1) {
// console.log('Updating weights from node ' + i);
for (var j = 0; j < this.layers[k + 1]; j += 1) {
if (!this.connectivity[k][i][j]) {
continue;
} // Update weights
this.weights[k][i][j] -= this.learningRate * outputs[k][i] * deltas[k + 1][j];
}
}
}
}
/**
* Pass a sample through the network, calculating the activations and outputs for all nodes in the
* network.
*
* @param {Array.<number>} x - Data point features
* @return {Array} - Array with two elements: containing the activations and outputs,
* respectively, for each node in the network
*/
}, {
key: "forwardPass",
value: function forwardPass(x) {
var _this4 = this;
if (x.length !== this.layers[0] - 1) {
throw new Error('Number of features of samples should match the number of network inputs.');
} // Output and activations of nodes in each layer, including a bias node
var activations = this.layers.map(function (a) {
return Arrays.zeros(a);
});
var outputs = this.layers.map(function (a) {
return Arrays.zeros(a);
}); // Fill the outputs of the first layer with the sample features, and initialize the activations
// of the first layer to an empty list
activations[0] = [];
outputs[0] = [1].concat(_toConsumableArray(x.slice())); // Propagate the inputs layer-by-layer
var _loop3 = function _loop3(layer) {
// Index of first regular node in this layer
var startNode = 0; // If this is not the output layer, set the output of the bias node to 1
if (layer < _this4.layers.length - 1) {
startNode = 1; // Bias node
outputs[layer][0] = 1;
} // Calculate the activation and output of each (non-bias) node in the layer
var _loop4 = function _loop4(node) {
// Calculate the activation as the weighted sum of the outputs (including the bias node) of
// the previous layer
activations[layer][node] = outputs[layer - 1].reduce(function (r, a, i) {
return r + a * _this4.weights[layer - 1][i][node];
}, 0); // Calculate the output of this node by applying the activation function to the activation
outputs[layer][node] = _this4.activationFunction(activations[layer][node]);
};
for (var node = startNode; node < _this4.layers[layer]; node += 1) {
_loop4(node);
}
};
for (var layer = 1; layer < this.layers.length; layer += 1) {
_loop3(layer);
}
return [activations, outputs];
}
/**
* Get the activation function value for the specified input.
*
* @param {number} a - Input value
* @return {number} Return value of activation function applied to input value
*/
}, {
key: "activationFunction",
value: function activationFunction(a) {
return sigmoid(a);
}
/**
* Get the function value for the derivative of the activation function for the specified input.
*
* @param {number} a - Input value
* @return {number} Return value of derivative of activation function applied to input value
*/
}, {
key: "activationFunctionDerivative",
value: function activationFunctionDerivative(a) {
return sigmoid(a) * (1 - sigmoid(a));
}
/**
* Manually set the weights matrices of the network.
*
* @brief {Array.<Array.<Array<number>>>} Weight matrix for each pair of subsequent layers
* For more information, see {@link FullyConnected#weights}
*/
}, {
key: "setWeights",
value: function setWeights(weights) {
this.weights = weights;
}
/**
* @see {@link Classifier#predict}
*/
}, {
key: "predict",
value: function predict(X) {
var _this5 = this;
return X.map(function (x) {
var _this5$forwardPass = _this5.forwardPass(x),
_this5$forwardPass2 = _slicedToArray(_this5$forwardPass, 2),
activations = _this5$forwardPass2[0],
outputs = _this5$forwardPass2[1];
return Arrays.argMax(outputs[outputs.length - 1]);
});
}
}]);
return FullyConnected;
}(_base.Classifier);
exports["default"] = FullyConnected;
module.exports = exports.default;