@tensorflow/tfjs-node
Version:
This repository provides native TensorFlow execution in backend JavaScript applications under the Node.js runtime, accelerated by the TensorFlow C binary under the hood. It provides the same API as [TensorFlow.js](https://js.tensorflow.org/api/latest/).
504 lines (503 loc) • 27.1 kB
JavaScript
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (g && (g = 0, op[0] && (_ = 0)), _) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
Object.defineProperty(exports, "__esModule", { value: true });
var tf = require("@tensorflow/tfjs");
var tfjs_1 = require("@tensorflow/tfjs");
var fs = require("fs");
var path = require("path");
var util_1 = require("util");
var tfn = require("./index");
// tslint:disable-next-line:no-require-imports
var rimraf = require('rimraf');
// tslint:disable-next-line:no-require-imports
var tmp = require('tmp');
var rimrafPromise = (0, util_1.promisify)(rimraf);
describe('tensorboard', function () {
var tmpLogDir;
beforeEach(function () {
tmpLogDir = tmp.dirSync().name;
});
afterEach(function () { return __awaiter(void 0, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(tmpLogDir != null)) return [3 /*break*/, 2];
return [4 /*yield*/, rimrafPromise(tmpLogDir)];
case 1:
_a.sent();
_a.label = 2;
case 2: return [2 /*return*/];
}
});
}); });
describe('SummaryFileWriter', function () {
it('empty logdir leads to error', function () {
expect(function () { return tfn.node.summaryFileWriter(''); }).toThrowError(/empty string/);
});
});
describe('SummaryFileWriter.scalar', function () {
it('Create summaryFileWriter and write scalar', function () {
var writer = tfn.node.summaryFileWriter(tmpLogDir);
writer.scalar('foo', 42, 0);
writer.flush();
// Currently, we only verify that the file exists and the size
// increases in a sensible way as we write more scalars to it.
// The difficulty is in reading the protobuf contents of the event
// file in JavaScript/TypeScript.
var fileNames = fs.readdirSync(tmpLogDir);
expect(fileNames.length).toEqual(1);
var eventFilePath = path.join(tmpLogDir, fileNames[0]);
var fileSize0 = fs.statSync(eventFilePath).size;
writer.scalar('foo', 43, 1);
writer.flush();
var fileSize1 = fs.statSync(eventFilePath).size;
var incrementPerScalar = fileSize1 - fileSize0;
expect(incrementPerScalar).toBeGreaterThan(0);
writer.scalar('foo', 44, 2);
writer.scalar('foo', 45, 3);
writer.flush();
var fileSize2 = fs.statSync(eventFilePath).size;
expect(fileSize2 - fileSize1).toEqual(2 * incrementPerScalar);
});
it('Writing tf.Scalar works', function () {
var writer = tfn.node.summaryFileWriter(tmpLogDir);
writer.scalar('foo', (0, tfjs_1.scalar)(42), 0);
writer.flush();
// Currently, we only verify that the file exists and the size
// increases in a sensible way as we write more scalars to it.
// The difficulty is in reading the protobuf contents of the event
// file in JavaScript/TypeScript.
var fileNames = fs.readdirSync(tmpLogDir);
expect(fileNames.length).toEqual(1);
});
it('Writing tf.Scalar no memory leak', function () {
var writer = tfn.node.summaryFileWriter(tmpLogDir);
var beforeNumTFTensors = writer.backend.getNumOfTFTensors();
var value = (0, tfjs_1.scalar)(42);
writer.scalar('foo', value, 0);
writer.flush();
value.dispose();
expect(writer.backend.getNumOfTFTensors()).toBe(beforeNumTFTensors);
});
it('No crosstalk between two summary writers', function () {
var logDir1 = path.join(tmpLogDir, '1');
var writer1 = tfn.node.summaryFileWriter(logDir1);
writer1.scalar('foo', 42, 0);
writer1.flush();
var logDir2 = path.join(tmpLogDir, '2');
var writer2 = tfn.node.summaryFileWriter(logDir2);
writer2.scalar('foo', 1.337, 0);
writer2.flush();
// Currently, we only verify that the file exists and the size
// increases in a sensible way as we write more scalars to it.
// The difficulty is in reading the protobuf contents of the event
// file in JavaScript/TypeScript.
var fileNames = fs.readdirSync(logDir1);
expect(fileNames.length).toEqual(1);
var eventFilePath1 = path.join(logDir1, fileNames[0]);
var fileSize1Num0 = fs.statSync(eventFilePath1).size;
fileNames = fs.readdirSync(logDir2);
expect(fileNames.length).toEqual(1);
var eventFilePath2 = path.join(logDir2, fileNames[0]);
var fileSize2Num0 = fs.statSync(eventFilePath2).size;
expect(fileSize2Num0).toBeGreaterThan(0);
writer1.scalar('foo', 43, 1);
writer1.flush();
var fileSize1Num1 = fs.statSync(eventFilePath1).size;
var incrementPerScalar = fileSize1Num1 - fileSize1Num0;
expect(incrementPerScalar).toBeGreaterThan(0);
writer1.scalar('foo', 44, 2);
writer1.scalar('foo', 45, 3);
writer1.flush();
var fileSize1Num2 = fs.statSync(eventFilePath1).size;
expect(fileSize1Num2 - fileSize1Num1).toEqual(2 * incrementPerScalar);
var fileSize2Num1 = fs.statSync(eventFilePath2).size;
expect(fileSize2Num1).toEqual(fileSize2Num0);
writer2.scalar('foo', 1.336, 1);
writer2.scalar('foo', 1.335, 2);
writer2.flush();
var fileSize1Num3 = fs.statSync(eventFilePath1).size;
expect(fileSize1Num3).toEqual(fileSize1Num2);
var fileSize2Num2 = fs.statSync(eventFilePath2).size;
expect(fileSize2Num2 - fileSize2Num1).toEqual(2 * incrementPerScalar);
});
it('Writing into existing directory works', function () {
fs.mkdirSync(tmpLogDir, { recursive: true });
var writer = tfn.node.summaryFileWriter(path.join(tmpLogDir, '22'));
writer.scalar('foo', 42, 0);
writer.flush();
var fileNames = fs.readdirSync(tmpLogDir);
expect(fileNames.length).toEqual(1);
});
});
describe('SummaryFileWriter.histogram', function () {
it('Create summaryFileWriter and write tensor', function () {
var writer = tfn.node.summaryFileWriter(tmpLogDir);
writer.histogram('foo', (0, tfjs_1.tensor1d)([1, 2, 3, 4, 5], 'int32'), 0, 5);
writer.flush();
// Currently, we only verify that the file exists and the size
// increases in a sensible way as we write more histograms to it.
// The difficulty is in reading the protobuf contents of the event
// file in JavaScript/TypeScript.
var fileNames = fs.readdirSync(tmpLogDir);
expect(fileNames.length).toEqual(1);
var eventFilePath = path.join(tmpLogDir, fileNames[0]);
var fileSize0 = fs.statSync(eventFilePath).size;
writer.histogram('foo', (0, tfjs_1.tensor1d)([1, 1, 1, 1, 1], 'int32'), 1, 5);
writer.flush();
var fileSize1 = fs.statSync(eventFilePath).size;
var incrementPerScalar = fileSize1 - fileSize0;
expect(incrementPerScalar).toBeGreaterThan(0);
writer.histogram('foo', (0, tfjs_1.tensor1d)([2, 2, 2, 2, 2], 'int32'), 2, 5);
writer.histogram('foo', (0, tfjs_1.tensor1d)([3, 3, 3, 3, 3], 'int32'), 3, 5);
writer.flush();
var fileSize2 = fs.statSync(eventFilePath).size;
expect(fileSize2 - fileSize1).toEqual(2 * incrementPerScalar);
});
it('summaryFileWriter no memory leak', function () {
var writer = tfn.node.summaryFileWriter(tmpLogDir);
var beforeNumTFTensors = writer.backend.getNumOfTFTensors();
var value = (0, tfjs_1.tensor1d)([1, 2, 3, 4, 5], 'int32');
writer.histogram('foo', value, 0, 5);
writer.flush();
value.dispose();
expect(writer.backend.getNumOfTFTensors()).toBe(beforeNumTFTensors);
});
it('Can create multiple normal distribution', function () {
var writer = tfn.node.summaryFileWriter(tmpLogDir);
tf.tidy(function () {
for (var i = 0; i < 10; ++i) {
var normal = tf.randomNormal([1000]).add(i / 10);
writer.histogram('random normal', normal, i, 5, 'This is a *test* of **Markdown**');
}
writer.flush();
});
});
});
});
describe('tensorBoard callback', function () {
var tmpLogDir;
beforeEach(function () {
tmpLogDir = tmp.dirSync().name;
});
afterEach(function () { return __awaiter(void 0, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(tmpLogDir != null)) return [3 /*break*/, 2];
return [4 /*yield*/, rimrafPromise(tmpLogDir)];
case 1:
_a.sent();
_a.label = 2;
case 2: return [2 /*return*/];
}
});
}); });
function createModelForTest() {
var model = tfn.sequential();
model.add(tfn.layers.dense({ units: 5, activation: 'relu', inputShape: [10] }));
model.add(tfn.layers.dense({ units: 1 }));
model.compile({ loss: 'meanSquaredError', optimizer: 'sgd', metrics: ['MAE'] });
return model;
}
it('fit(): default epoch updateFreq, with validation', function () { return __awaiter(void 0, void 0, void 0, function () {
var model, xs, ys, valXs, valYs, subDirs, trainLogDir, trainFiles, trainFileSize0, valLogDir, valFiles, valFileSize0, history, trainFileSize1, valFileSize1;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
model = createModelForTest();
xs = tfn.randomUniform([100, 10]);
ys = tfn.randomUniform([100, 1]);
valXs = tfn.randomUniform([10, 10]);
valYs = tfn.randomUniform([10, 1]);
// Warm-up training.
return [4 /*yield*/, model.fit(xs, ys, {
epochs: 1,
verbose: 0,
validationData: [valXs, valYs],
callbacks: tfn.node.tensorBoard(tmpLogDir)
})];
case 1:
// Warm-up training.
_a.sent();
subDirs = fs.readdirSync(tmpLogDir);
expect(subDirs).toContain('train');
expect(subDirs).toContain('val');
trainLogDir = path.join(tmpLogDir, 'train');
trainFiles = fs.readdirSync(trainLogDir);
trainFileSize0 = fs.statSync(path.join(trainLogDir, trainFiles[0])).size;
expect(trainFileSize0).toBeGreaterThan(0);
valLogDir = path.join(tmpLogDir, 'val');
valFiles = fs.readdirSync(valLogDir);
valFileSize0 = fs.statSync(path.join(valLogDir, valFiles[0])).size;
expect(valFileSize0).toBeGreaterThan(0);
// With updateFreq === epoch, the train and val subset should have generated
// the same amount of logs.
expect(valFileSize0).toEqual(trainFileSize0);
return [4 /*yield*/, model.fit(xs, ys, {
epochs: 3,
verbose: 0,
validationData: [valXs, valYs],
callbacks: tfn.node.tensorBoard(tmpLogDir)
})];
case 2:
history = _a.sent();
expect(history.history.loss.length).toEqual(3);
expect(history.history.val_loss.length).toEqual(3);
expect(history.history.MAE.length).toEqual(3);
expect(history.history.val_MAE.length).toEqual(3);
trainFileSize1 = fs.statSync(path.join(trainLogDir, trainFiles[0])).size;
valFileSize1 = fs.statSync(path.join(valLogDir, valFiles[0])).size;
// We currently only assert that new content has been written to the log
// file.
expect(trainFileSize1).toBeGreaterThan(trainFileSize0);
expect(valFileSize1).toBeGreaterThan(valFileSize0);
// With updateFreq === epoch, the train and val subset should have generated
// the same amount of logs.
expect(valFileSize1).toEqual(trainFileSize1);
return [2 /*return*/];
}
});
}); });
it('fit(): batch updateFreq, with validation', function () { return __awaiter(void 0, void 0, void 0, function () {
var model, xs, ys, valXs, valYs, subDirs, trainLogDir, trainFiles, trainFileSize0, valLogDir, valFiles, valFileSize0, history, trainFileSize1, valFileSize1;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
model = createModelForTest();
xs = tfn.randomUniform([100, 10]);
ys = tfn.randomUniform([100, 1]);
valXs = tfn.randomUniform([10, 10]);
valYs = tfn.randomUniform([10, 1]);
// Warm-up training.
return [4 /*yield*/, model.fit(xs, ys, {
epochs: 1,
verbose: 0,
validationData: [valXs, valYs],
// Use batch updateFreq here.
callbacks: tfn.node.tensorBoard(tmpLogDir, { updateFreq: 'batch' })
})];
case 1:
// Warm-up training.
_a.sent();
subDirs = fs.readdirSync(tmpLogDir);
expect(subDirs).toContain('train');
expect(subDirs).toContain('val');
trainLogDir = path.join(tmpLogDir, 'train');
trainFiles = fs.readdirSync(trainLogDir);
trainFileSize0 = fs.statSync(path.join(trainLogDir, trainFiles[0])).size;
expect(trainFileSize0).toBeGreaterThan(0);
valLogDir = path.join(tmpLogDir, 'val');
valFiles = fs.readdirSync(valLogDir);
valFileSize0 = fs.statSync(path.join(valLogDir, valFiles[0])).size;
expect(valFileSize0).toBeGreaterThan(0);
// The train subset should have generated more logs than the val subset,
// because the train subset gets logged every batch, while the val subset
// gets logged every epoch.
expect(trainFileSize0).toBeGreaterThan(valFileSize0);
return [4 /*yield*/, model.fit(xs, ys, {
epochs: 3,
verbose: 0,
validationData: [valXs, valYs],
callbacks: tfn.node.tensorBoard(tmpLogDir)
})];
case 2:
history = _a.sent();
expect(history.history.loss.length).toEqual(3);
expect(history.history.val_loss.length).toEqual(3);
expect(history.history.MAE.length).toEqual(3);
expect(history.history.val_MAE.length).toEqual(3);
trainFileSize1 = fs.statSync(path.join(trainLogDir, trainFiles[0])).size;
valFileSize1 = fs.statSync(path.join(valLogDir, valFiles[0])).size;
// We currently only assert that new content has been written to the log
// file.
expect(trainFileSize1).toBeGreaterThan(trainFileSize0);
expect(valFileSize1).toBeGreaterThan(valFileSize0);
// The train subset should have generated more logs than the val subset,
// because the train subset gets logged every batch, while the val subset
// gets logged every epoch.
expect(trainFileSize1).toBeGreaterThan(valFileSize1);
return [2 /*return*/];
}
});
}); });
it('fit(): batch updateFreq, with initialEpoch', function () { return __awaiter(void 0, void 0, void 0, function () {
var model, xs, ys, valXs, valYs, callback, trainWriterScalarSpy, valWriterScalarSpy, trainCallArgs, valCallArgs;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
model = createModelForTest();
xs = tfn.randomUniform([100, 10]);
ys = tfn.randomUniform([100, 1]);
valXs = tfn.randomUniform([10, 10]);
valYs = tfn.randomUniform([10, 1]);
// Warm-up training. Also ensures that `callback.trainWriter` and
// `callback.valWriter` are created.
return [4 /*yield*/, model.fit(xs, ys, {
epochs: 2,
validationData: [valXs, valYs],
verbose: 0,
})];
case 1:
// Warm-up training. Also ensures that `callback.trainWriter` and
// `callback.valWriter` are created.
_a.sent();
callback = tfn.node.tensorBoard(tmpLogDir, { updateFreq: 'epoch' });
// tslint:disable-next-line:no-any
callback.ensureTrainWriterCreated();
// tslint:disable-next-line:no-any
callback.ensureValWriterCreated();
trainWriterScalarSpy = spyOn(callback.trainWriter, 'scalar');
valWriterScalarSpy = spyOn(callback.valWriter, 'scalar');
// Train for 2 more epochs, using initialEpoch and callback.
return [4 /*yield*/, model.fit(xs, ys, {
epochs: 4,
initialEpoch: 2,
validationData: [valXs, valYs],
callbacks: [callback],
})];
case 2:
// Train for 2 more epochs, using initialEpoch and callback.
_a.sent();
expect(trainWriterScalarSpy).toHaveBeenCalledTimes(4);
trainCallArgs = trainWriterScalarSpy.calls.allArgs();
// Assert that the epoch numbers used to log the epoch-end loss and metric
// reflect initialEpoch.
expect(trainCallArgs[0][0]).toEqual('epoch_loss');
expect(trainCallArgs[0][2]).toEqual(3);
expect(trainCallArgs[1][0]).toEqual('epoch_MAE');
expect(trainCallArgs[1][2]).toEqual(3);
expect(trainCallArgs[2][0]).toEqual('epoch_loss');
expect(trainCallArgs[2][2]).toEqual(4);
expect(trainCallArgs[3][0]).toEqual('epoch_MAE');
expect(trainCallArgs[3][2]).toEqual(4);
expect(valWriterScalarSpy).toHaveBeenCalledTimes(4);
valCallArgs = valWriterScalarSpy.calls.allArgs();
expect(valCallArgs[0][0]).toEqual('epoch_loss');
expect(valCallArgs[0][2]).toEqual(3);
expect(valCallArgs[1][0]).toEqual('epoch_MAE');
expect(valCallArgs[1][2]).toEqual(3);
expect(valCallArgs[2][0]).toEqual('epoch_loss');
expect(valCallArgs[2][2]).toEqual(4);
expect(valCallArgs[3][0]).toEqual('epoch_MAE');
expect(valCallArgs[3][2]).toEqual(4);
return [2 /*return*/];
}
});
}); });
it('fit(): histogramFreq 2, with validation', function () { return __awaiter(void 0, void 0, void 0, function () {
var model, xs, ys, valXs, valYs, subDirs, trainLogDir, trainFiles, trainFileSize0, trainFileSize1;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
model = createModelForTest();
xs = tfn.randomUniform([100, 10]);
ys = tfn.randomUniform([100, 1]);
valXs = tfn.randomUniform([10, 10]);
valYs = tfn.randomUniform([10, 1]);
// Warm-up training.
return [4 /*yield*/, model.fit(xs, ys, {
epochs: 1,
verbose: 0,
validationData: [valXs, valYs],
callbacks: tfn.node.tensorBoard(tmpLogDir, { histogramFreq: 2 })
})];
case 1:
// Warm-up training.
_a.sent();
subDirs = fs.readdirSync(tmpLogDir);
expect(subDirs).toContain('train');
trainLogDir = path.join(tmpLogDir, 'train');
trainFiles = fs.readdirSync(trainLogDir);
trainFileSize0 = fs.statSync(path.join(trainLogDir, trainFiles[0])).size;
expect(trainFileSize0).toBeGreaterThan(0);
// Actual training run.
return [4 /*yield*/, model.fit(xs, ys, {
epochs: 4,
verbose: 0,
validationData: [valXs, valYs],
callbacks: tfn.node.tensorBoard(tmpLogDir, { histogramFreq: 2 })
})];
case 2:
// Actual training run.
_a.sent();
trainFileSize1 = fs.statSync(path.join(trainLogDir, trainFiles[0])).size;
// We currently only assert that new content has been written to the log
// file.
expect(trainFileSize1).toBeGreaterThan(trainFileSize0);
return [2 /*return*/];
}
});
}); });
it('Invalid updateFreq value causes error', function () { return __awaiter(void 0, void 0, void 0, function () {
return __generator(this, function (_a) {
expect(function () { return tfn.node.tensorBoard(tmpLogDir, {
// tslint:disable-next-line:no-any
updateFreq: 'foo'
}); }).toThrowError(/Expected updateFreq/);
return [2 /*return*/];
});
}); });
it('Invalid histogramFreq value causes error ', function () { return __awaiter(void 0, void 0, void 0, function () {
return __generator(this, function (_a) {
expect(function () { return tfn.node.tensorBoard(tmpLogDir, {
histogramFreq: -1
}); }).toThrowError(/Expected histogramFreq/);
expect(function () { return tfn.node.tensorBoard(tmpLogDir, {
histogramFreq: 1.3
}); }).toThrowError(/Expected histogramFreq/);
return [2 /*return*/];
});
}); });
});
;