UNPKG

@tensorflow/tfjs-core

Version:

Hardware-accelerated JavaScript library for machine intelligence

697 lines 156 kB
/** * @license * Copyright 2017 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ import * as tf from '../index'; import { ALL_ENVS, describeWithFlags } from '../jasmine_util'; import { expectArraysClose } from '../test_util'; describeWithFlags('batchNorm4D', ALL_ENVS, () => { it('simple batchnorm4D, no offset or scale, 2x1x1x2', async () => { const xT = tf.tensor4d([2, 4, 9, 23], [2, 1, 1, 2]); const meanT = tf.tensor1d([1, 2]); const varianceT = tf.tensor1d([2, 3]); const varianceEpsilon = .001; const result = tf.batchNorm4d(xT, meanT, varianceT, undefined, undefined, varianceEpsilon); const x = await xT.array(); const mean = await meanT.array(); const variance = await varianceT.array(); expectArraysClose(await result.data(), [ (x[0][0][0][0] - mean[0]) * 1 / Math.sqrt(variance[0] + varianceEpsilon), (x[0][0][0][1] - mean[1]) * 1 / Math.sqrt(variance[1] + varianceEpsilon), (x[1][0][0][0] - mean[0]) * 1 / Math.sqrt(variance[0] + varianceEpsilon), (x[1][0][0][1] - mean[1]) * 1 / Math.sqrt(variance[1] + varianceEpsilon) ]); }); it('simple batchnorm4D, no offset, 2x1x1x2', async () => { const xT = tf.tensor4d([2, 4, 9, 23], [2, 1, 1, 2]); const meanT = tf.tensor1d([1, 2]); const varianceT = tf.tensor1d([2, 3]); const scaleT = tf.tensor1d([4, 5]); const varianceEpsilon = .001; const result = tf.batchNorm4d(xT, meanT, varianceT, undefined, scaleT, varianceEpsilon); const x = await xT.buffer(); const mean = await meanT.buffer(); const variance = await varianceT.buffer(); const scale = await scaleT.buffer(); expectArraysClose(await result.data(), [ (x.get(0, 0, 0, 0) - mean.get(0)) * scale.get(0) / Math.sqrt(variance.get(0) + varianceEpsilon), (x.get(0, 0, 0, 1) - mean.get(1)) * scale.get(1) / Math.sqrt(variance.get(1) + varianceEpsilon), (x.get(1, 0, 0, 0) - mean.get(0)) * scale.get(0) / Math.sqrt(variance.get(0) + varianceEpsilon), (x.get(1, 0, 0, 1) - mean.get(1)) * scale.get(1) / Math.sqrt(variance.get(1) + varianceEpsilon) ]); }); it('simple batchnorm4D, no scale, 2x1x1x2', async () => { const xT = tf.tensor4d([2, 4, 9, 23], [2, 1, 1, 2]); const meanT = tf.tensor1d([1, 2]); const varianceT = tf.tensor1d([2, 3]); const offsetT = tf.tensor1d([4, 5]); const varianceEpsilon = .001; const result = tf.batchNorm4d(xT, meanT, varianceT, offsetT, undefined, varianceEpsilon); const x = await xT.buffer(); const mean = await meanT.buffer(); const variance = await varianceT.buffer(); const offset = await offsetT.buffer(); expectArraysClose(await result.data(), [ offset.get(0) + (x.get(0, 0, 0, 0) - mean.get(0)) * 1 / Math.sqrt(variance.get(0) + varianceEpsilon), offset.get(1) + (x.get(0, 0, 0, 1) - mean.get(1)) * 1 / Math.sqrt(variance.get(1) + varianceEpsilon), offset.get(0) + (x.get(1, 0, 0, 0) - mean.get(0)) * 1 / Math.sqrt(variance.get(0) + varianceEpsilon), offset.get(1) + (x.get(1, 0, 0, 1) - mean.get(1)) * 1 / Math.sqrt(variance.get(1) + varianceEpsilon) ]); }); it('simple batchnorm4D, 2x1x1x2', async () => { const xT = tf.tensor4d([2, 4, 9, 23], [2, 1, 1, 2]); const meanT = tf.tensor1d([1, 2]); const varianceT = tf.tensor1d([2, 3]); const offsetT = tf.tensor1d([3, 4]); const scaleT = tf.tensor1d([4, 5]); const varianceEpsilon = .001; const result = tf.batchNorm4d(xT, meanT, varianceT, offsetT, scaleT, varianceEpsilon); const x = await xT.buffer(); const mean = await meanT.buffer(); const variance = await varianceT.buffer(); const scale = await scaleT.buffer(); const offset = await offsetT.buffer(); expectArraysClose(await result.data(), [ offset.get(0) + (x.get(0, 0, 0, 0) - mean.get(0)) * scale.get(0) / Math.sqrt(variance.get(0) + varianceEpsilon), offset.get(1) + (x.get(0, 0, 0, 1) - mean.get(1)) * scale.get(1) / Math.sqrt(variance.get(1) + varianceEpsilon), offset.get(0) + (x.get(1, 0, 0, 0) - mean.get(0)) * scale.get(0) / Math.sqrt(variance.get(0) + varianceEpsilon), offset.get(1) + (x.get(1, 0, 0, 1) - mean.get(1)) * scale.get(1) / Math.sqrt(variance.get(1) + varianceEpsilon) ]); }); it('accepts a tensor-like object', async () => { const x = [[[[2, 4]]], [[[9, 23]]]]; // 2x1x1x2 const mean = [1, 2]; const variance = [2, 3]; const offset = [3, 4]; const scale = [4, 5]; const varianceEpsilon = .001; const result = tf.batchNorm4d(x, mean, variance, offset, scale, varianceEpsilon); expectArraysClose(await result.data(), [ offset[0] + (x[0][0][0][0] - mean[0]) * scale[0] / Math.sqrt(variance[0] + varianceEpsilon), offset[1] + (x[0][0][0][1] - mean[1]) * scale[1] / Math.sqrt(variance[1] + varianceEpsilon), offset[0] + (x[1][0][0][0] - mean[0]) * scale[0] / Math.sqrt(variance[0] + varianceEpsilon), offset[1] + (x[1][0][0][1] - mean[1]) * scale[1] / Math.sqrt(variance[1] + varianceEpsilon) ]); }); it('simple batchnorm4D gradients, 2x1x1x2', async () => { const x = tf.tensor4d([2, 4, 9, 23], [2, 1, 1, 2]); const mean = tf.tensor1d([1, 2]); const variance = tf.tensor1d([2, 3]); const offset = tf.tensor1d([3, 4]); const scale = tf.tensor1d([2, 5]); const varianceEpsilon = .001; const dy = tf.tensor4d([-1, -1, -1, -1], [2, 1, 1, 2]); const gradX = tf.grad((x) => tf.batchNorm4d(x, mean, variance, offset, scale, varianceEpsilon))(x, dy); expectArraysClose(await gradX.data(), [-1.414, -2.887, -1.414, -2.887]); expect(gradX.shape).toEqual([2, 1, 1, 2]); const gradMean = tf.grad((mean) => tf.batchNorm4d(x, mean, variance, offset, scale, varianceEpsilon))(mean, dy); expectArraysClose(await gradMean.data(), [2.828, 5.773]); expect(gradMean.shape).toEqual([2]); const gradVariance = tf.grad((variance) => tf.batchNorm4d(x, mean, variance, offset, scale, varianceEpsilon))(variance, dy); expectArraysClose(await gradVariance.data(), [3.180, 11.060]); expect(gradVariance.shape).toEqual([2]); const gradOffset = tf.grad((offset) => tf.batchNorm4d(x, mean, variance, offset, scale, varianceEpsilon))(offset, dy); expectArraysClose(await gradOffset.data(), await dy.sum([0, 1, 2]).data()); expect(gradOffset.shape).toEqual([2]); const gradScale = tf.grad((scale) => tf.batchNorm4d(x, mean, variance, offset, scale, varianceEpsilon))(scale, dy); expectArraysClose(await gradScale.data(), [-6.362, -13.277]); expect(gradScale.shape).toEqual([2]); }); it('batchnorm4D gradients, same shapes in x, mean and variance', async () => { const x = tf.tensor4d([10, 20, 30, 40], [2, 1, 1, 2]); const mean = tf.tensor4d([0, 5, 10, 15], [2, 1, 1, 2]); const variance = tf.tensor4d([2, 4, 6, 8], [2, 1, 1, 2]); const scale = tf.tensor4d([2, 5, 2, 5], [2, 1, 1, 2]); const offset = tf.tensor4d([0, 0, 0, 0], [2, 1, 1, 2]); const varianceEpsilon = .001; const dy = tf.tensor4d([-1, -1, -1, -1], [2, 1, 1, 2]); const gradX = tf.grad((x) => tf.batchNorm4d(x, mean, variance, offset, scale, varianceEpsilon))(x, dy); expectArraysClose(await gradX.data(), [-1.414, -2.500, -0.816, -1.768]); expect(gradX.shape).toEqual([2, 1, 1, 2]); const gradMean = tf.grad((mean) => tf.batchNorm4d(x, mean, variance, offset, scale, varianceEpsilon))(mean, dy); expectArraysClose(await gradMean.data(), [1.414, 2.500, 0.816, 1.768]); expect(gradMean.shape).toEqual([2, 1, 1, 2]); const gradVariance = tf.grad((variance) => tf.batchNorm4d(x, mean, variance, offset, scale, varianceEpsilon))(variance, dy); expectArraysClose(await gradVariance.data(), [3.533, 4.686, 1.360, 2.762]); expect(gradVariance.shape).toEqual([2, 1, 1, 2]); const gradOffset = tf.grad((offset) => tf.batchNorm4d(x, mean, variance, offset, scale, varianceEpsilon))(offset, dy); expectArraysClose(await gradOffset.data(), await dy.data()); expect(gradOffset.shape).toEqual([2, 1, 1, 2]); const gradScale = tf.grad((scale) => tf.batchNorm4d(x, mean, variance, offset, scale, varianceEpsilon))(scale, dy); expectArraysClose(await gradScale.data(), [-7.069, -7.499, -8.164, -8.838]); expect(gradScale.shape).toEqual([2, 1, 1, 2]); }); }); describeWithFlags('batchNorm3D', ALL_ENVS, () => { it('simple batchnorm3D, no offset or scale, 2x1x2', async () => { const xT = tf.tensor3d([2, 4, 9, 23], [2, 1, 2]); const meanT = tf.tensor1d([1, 2]); const varianceT = tf.tensor1d([2, 3]); const varianceEpsilon = .001; const result = tf.batchNorm3d(xT, meanT, varianceT, undefined, undefined, varianceEpsilon); const x = await xT.buffer(); const mean = await meanT.buffer(); const variance = await varianceT.buffer(); expectArraysClose(await result.data(), [ (x.get(0, 0, 0) - mean.get(0)) * 1 / Math.sqrt(variance.get(0) + varianceEpsilon), (x.get(0, 0, 1) - mean.get(1)) * 1 / Math.sqrt(variance.get(1) + varianceEpsilon), (x.get(1, 0, 0) - mean.get(0)) * 1 / Math.sqrt(variance.get(0) + varianceEpsilon), (x.get(1, 0, 1) - mean.get(1)) * 1 / Math.sqrt(variance.get(1) + varianceEpsilon) ]); }); it('simple batchnorm3D, no offset, 2x1x2', async () => { const xT = tf.tensor3d([2, 4, 9, 23], [2, 1, 2]); const meanT = tf.tensor1d([1, 2]); const varianceT = tf.tensor1d([2, 3]); const scaleT = tf.tensor1d([4, 5]); const varianceEpsilon = .001; const result = tf.batchNorm3d(xT, meanT, varianceT, undefined, scaleT, varianceEpsilon); const x = await xT.buffer(); const mean = await meanT.buffer(); const variance = await varianceT.buffer(); const scale = await scaleT.buffer(); expectArraysClose(await result.data(), [ (x.get(0, 0, 0) - mean.get(0)) * scale.get(0) / Math.sqrt(variance.get(0) + varianceEpsilon), (x.get(0, 0, 1) - mean.get(1)) * scale.get(1) / Math.sqrt(variance.get(1) + varianceEpsilon), (x.get(1, 0, 0) - mean.get(0)) * scale.get(0) / Math.sqrt(variance.get(0) + varianceEpsilon), (x.get(1, 0, 1) - mean.get(1)) * scale.get(1) / Math.sqrt(variance.get(1) + varianceEpsilon) ]); }); it('simple batchnorm3D, no scale, 2x1x2', async () => { const xT = tf.tensor3d([2, 4, 9, 23], [2, 1, 2]); const meanT = tf.tensor1d([1, 2]); const varianceT = tf.tensor1d([2, 3]); const offsetT = tf.tensor1d([4, 5]); const varianceEpsilon = .001; const result = tf.batchNorm3d(xT, meanT, varianceT, offsetT, undefined, varianceEpsilon); const x = await xT.buffer(); const mean = await meanT.buffer(); const variance = await varianceT.buffer(); const offset = await offsetT.buffer(); expectArraysClose(await result.data(), [ offset.get(0) + (x.get(0, 0, 0) - mean.get(0)) * 1 / Math.sqrt(variance.get(0) + varianceEpsilon), offset.get(1) + (x.get(0, 0, 1) - mean.get(1)) * 1 / Math.sqrt(variance.get(1) + varianceEpsilon), offset.get(0) + (x.get(1, 0, 0) - mean.get(0)) * 1 / Math.sqrt(variance.get(0) + varianceEpsilon), offset.get(1) + (x.get(1, 0, 1) - mean.get(1)) * 1 / Math.sqrt(variance.get(1) + varianceEpsilon) ]); }); it('simple batchnorm3D, 2x1x2', async () => { const xT = tf.tensor3d([2, 4, 9, 23], [2, 1, 2]); const meanT = tf.tensor1d([1, 2]); const varianceT = tf.tensor1d([2, 3]); const offsetT = tf.tensor1d([3, 4]); const scaleT = tf.tensor1d([4, 5]); const varianceEpsilon = .001; const result = tf.batchNorm3d(xT, meanT, varianceT, offsetT, scaleT, varianceEpsilon); const x = await xT.buffer(); const mean = await meanT.buffer(); const variance = await varianceT.buffer(); const offset = await offsetT.buffer(); const scale = await scaleT.buffer(); expectArraysClose(await result.data(), [ offset.get(0) + (x.get(0, 0, 0) - mean.get(0)) * scale.get(0) / Math.sqrt(variance.get(0) + varianceEpsilon), offset.get(1) + (x.get(0, 0, 1) - mean.get(1)) * scale.get(1) / Math.sqrt(variance.get(1) + varianceEpsilon), offset.get(0) + (x.get(1, 0, 0) - mean.get(0)) * scale.get(0) / Math.sqrt(variance.get(0) + varianceEpsilon), offset.get(1) + (x.get(1, 0, 1) - mean.get(1)) * scale.get(1) / Math.sqrt(variance.get(1) + varianceEpsilon) ]); }); it('accepts a tensor-like object', async () => { const x = [[[2, 4]], [[9, 23]]]; // 2x1x2 const mean = [1, 2]; const variance = [2, 3]; const offset = [3, 4]; const scale = [4, 5]; const varianceEpsilon = .001; const result = tf.batchNorm3d(x, mean, variance, offset, scale, varianceEpsilon); expectArraysClose(await result.data(), [ offset[0] + (x[0][0][0] - mean[0]) * scale[0] / Math.sqrt(variance[0] + varianceEpsilon), offset[1] + (x[0][0][1] - mean[1]) * scale[1] / Math.sqrt(variance[1] + varianceEpsilon), offset[0] + (x[1][0][0] - mean[0]) * scale[0] / Math.sqrt(variance[0] + varianceEpsilon), offset[1] + (x[1][0][1] - mean[1]) * scale[1] / Math.sqrt(variance[1] + varianceEpsilon) ]); }); it('batchnorm3D, x,mean,var,offset,scale are all 3D', async () => { const shape = [2, 1, 2]; const xT = tf.tensor3d([2, 4, 9, 23], shape); const meanT = tf.tensor3d([1, 2, 3, 4], shape); const varianceT = tf.tensor3d([2, 3, 4, 5], shape); const offsetT = tf.tensor3d([3, 4, 5, 6], shape); const scaleT = tf.tensor3d([4, 5, 6, 7], shape); const varianceEpsilon = .001; const result = tf.batchNorm3d(xT, meanT, varianceT, offsetT, scaleT, varianceEpsilon); const x = await xT.buffer(); const mean = await meanT.buffer(); const variance = await varianceT.buffer(); const offset = await offsetT.buffer(); const scale = await scaleT.buffer(); expectArraysClose(await result.data(), [ offset.get(0, 0, 0) + (x.get(0, 0, 0) - mean.get(0, 0, 0)) * scale.get(0, 0, 0) / Math.sqrt(variance.get(0, 0, 0) + varianceEpsilon), offset.get(0, 0, 1) + (x.get(0, 0, 1) - mean.get(0, 0, 1)) * scale.get(0, 0, 1) / Math.sqrt(variance.get(0, 0, 1) + varianceEpsilon), offset.get(1, 0, 0) + (x.get(1, 0, 0) - mean.get(1, 0, 0)) * scale.get(1, 0, 0) / Math.sqrt(variance.get(1, 0, 0) + varianceEpsilon), offset.get(1, 0, 1) + (x.get(1, 0, 1) - mean.get(1, 0, 1)) * scale.get(1, 0, 1) / Math.sqrt(variance.get(1, 0, 1) + varianceEpsilon) ]); }); it('simple batchnorm3D gradients, 2x1x2', async () => { const x = tf.tensor3d([2, 4, 9, 23], [2, 1, 2]); const mean = tf.tensor1d([1, 2]); const variance = tf.tensor1d([2, 3]); const offset = tf.tensor1d([3, 4]); const scale = tf.tensor1d([2, 5]); const varianceEpsilon = .001; const dy = tf.tensor3d([1, 1, 1, 1], [2, 1, 2]); const gradX = tf.grad((x) => tf.batchNorm3d(x, mean, variance, offset, scale, varianceEpsilon))(x, dy); expectArraysClose(await gradX.data(), [1.414, 2.887, 1.414, 2.887]); expect(gradX.shape).toEqual([2, 1, 2]); const gradMean = tf.grad((mean) => tf.batchNorm3d(x, mean, variance, offset, scale, varianceEpsilon))(mean, dy); expectArraysClose(await gradMean.data(), [-2.828, -5.773]); expect(gradMean.shape).toEqual([2]); const gradVariance = tf.grad((variance) => tf.batchNorm3d(x, mean, variance, offset, scale, varianceEpsilon))(variance, dy); expectArraysClose(await gradVariance.data(), [-3.180, -11.060]); expect(gradVariance.shape).toEqual([2]); const gradOffset = tf.grad((offset) => tf.batchNorm3d(x, mean, variance, offset, scale, varianceEpsilon))(offset, dy); expectArraysClose(await gradOffset.data(), [2, 2]); expect(gradOffset.shape).toEqual([2]); const gradScale = tf.grad((scale) => tf.batchNorm3d(x, mean, variance, offset, scale, varianceEpsilon))(scale, dy); expectArraysClose(await gradScale.data(), [6.362, 13.277]); expect(gradScale.shape).toEqual([2]); }); it('batchnorm3D gradients, same shapes in x, mean and variance', async () => { const x = tf.tensor3d([10, 20, 30, 40], [2, 1, 2]); const mean = tf.tensor3d([0, 5, 10, 15], [2, 1, 2]); const variance = tf.tensor3d([2, 4, 6, 8], [2, 1, 2]); const scale = tf.tensor3d([2, 5, 2, 5], [2, 1, 2]); const offset = tf.tensor3d([0, 0, 0, 0], [2, 1, 2]); const varianceEpsilon = .001; const dy = tf.tensor3d([1, 1, 1, 1], [2, 1, 2]); const gradX = tf.grad((x) => tf.batchNorm3d(x, mean, variance, offset, scale, varianceEpsilon))(x, dy); expectArraysClose(await gradX.data(), [1.414, 2.500, 0.816, 1.768]); expect(gradX.shape).toEqual([2, 1, 2]); const gradMean = tf.grad((mean) => tf.batchNorm3d(x, mean, variance, offset, scale, varianceEpsilon))(mean, dy); expectArraysClose(await gradMean.data(), [-1.414, -2.500, -0.816, -1.768]); expect(gradMean.shape).toEqual([2, 1, 2]); const gradVariance = tf.grad((variance) => tf.batchNorm3d(x, mean, variance, offset, scale, varianceEpsilon))(variance, dy); expectArraysClose(await gradVariance.data(), [-3.533, -4.686, -1.360, -2.762]); expect(gradVariance.shape).toEqual([2, 1, 2]); const gradOffset = tf.grad((offset) => tf.batchNorm3d(x, mean, variance, offset, scale, varianceEpsilon))(offset, dy); expectArraysClose(await gradOffset.data(), [1, 1, 1, 1]); expect(gradOffset.shape).toEqual([2, 1, 2]); const gradScale = tf.grad((scale) => tf.batchNorm3d(x, mean, variance, offset, scale, varianceEpsilon))(scale, dy); expectArraysClose(await gradScale.data(), [7.069, 7.499, 8.164, 8.838]); expect(gradScale.shape).toEqual([2, 1, 2]); }); it('batchnorm matches tensorflow, 2x3x3', async () => { const x = tf.tensor3d([ 0.49955603, 0.04158615, -1.09440524, 2.03854165, -0.61578344, 2.87533573, 1.18105987, 0.807462, 1.87888837, 2.26563962, -0.37040935, 1.35848753, -0.75347094, 0.15683117, 0.91925946, 0.34121279, 0.92717143, 1.89683965 ], [2, 3, 3]); const mean = tf.tensor1d([0.39745062, -0.48062894, 0.4847822]); const variance = tf.tensor1d([0.32375343, 0.67117643, 1.08334653]); const offset = tf.tensor1d([0.69398749, -1.29056387, 0.9429723]); const scale = tf.tensor1d([-0.5607271, 0.9878457, 0.25181573]); const varianceEpsilon = .001; const result = tf.batchNorm3d(x, mean, variance, offset, scale, varianceEpsilon); expectArraysClose(await result.data(), [ 0.59352049, -0.66135202, 0.5610874, -0.92077015, -1.45341019, 1.52106473, -0.07704776, 0.26144429, 1.28010017, -1.14422404, -1.15776136, 1.15425493, 1.82644104, -0.52249442, 1.04803919, 0.74932291, 0.40568101, 1.2844412 ]); }); }); describeWithFlags('batchNorm2D', ALL_ENVS, () => { it('simple batchnorm2D, no offset or scale, 2x2', async () => { const xT = tf.tensor2d([2, 4, 9, 23], [2, 2]); const meanT = tf.tensor1d([1, 2]); const varianceT = tf.tensor1d([2, 3]); const varianceEpsilon = .001; const result = tf.batchNorm2d(xT, meanT, varianceT, undefined, undefined, varianceEpsilon); const x = await xT.buffer(); const mean = await meanT.buffer(); const variance = await varianceT.buffer(); expectArraysClose(await result.data(), [ (x.get(0, 0) - mean.get(0)) * 1 / Math.sqrt(variance.get(0) + varianceEpsilon), (x.get(0, 1) - mean.get(1)) * 1 / Math.sqrt(variance.get(1) + varianceEpsilon), (x.get(1, 0) - mean.get(0)) * 1 / Math.sqrt(variance.get(0) + varianceEpsilon), (x.get(1, 1) - mean.get(1)) * 1 / Math.sqrt(variance.get(1) + varianceEpsilon) ]); }); it('simple batchnorm2D, no offset, 2x2', async () => { const xT = tf.tensor2d([2, 4, 9, 23], [2, 2]); const meanT = tf.tensor1d([1, 2]); const varianceT = tf.tensor1d([2, 3]); const scaleT = tf.tensor1d([4, 5]); const varianceEpsilon = .001; const result = tf.batchNorm2d(xT, meanT, varianceT, undefined, scaleT, varianceEpsilon); const x = await xT.buffer(); const mean = await meanT.buffer(); const variance = await varianceT.buffer(); const scale = await scaleT.buffer(); expectArraysClose(await result.data(), [ (x.get(0, 0) - mean.get(0)) * scale.get(0) / Math.sqrt(variance.get(0) + varianceEpsilon), (x.get(0, 1) - mean.get(1)) * scale.get(1) / Math.sqrt(variance.get(1) + varianceEpsilon), (x.get(1, 0) - mean.get(0)) * scale.get(0) / Math.sqrt(variance.get(0) + varianceEpsilon), (x.get(1, 1) - mean.get(1)) * scale.get(1) / Math.sqrt(variance.get(1) + varianceEpsilon) ]); }); it('simple batchnorm2D, no scale, 2x2', async () => { const xT = tf.tensor2d([2, 4, 9, 23], [2, 2]); const meanT = tf.tensor1d([1, 2]); const varianceT = tf.tensor1d([2, 3]); const offsetT = tf.tensor1d([4, 5]); const varianceEpsilon = .001; const result = tf.batchNorm2d(xT, meanT, varianceT, offsetT, undefined, varianceEpsilon); const offset = await offsetT.array(); const mean = await meanT.array(); const variance = await varianceT.array(); const x = await xT.array(); expectArraysClose(await result.data(), [ offset[0] + (x[0][0] - mean[0]) * 1 / Math.sqrt(variance[0] + varianceEpsilon), offset[1] + (x[0][1] - mean[1]) * 1 / Math.sqrt(variance[1] + varianceEpsilon), offset[0] + (x[1][0] - mean[0]) * 1 / Math.sqrt(variance[0] + varianceEpsilon), offset[1] + (x[1][1] - mean[1]) * 1 / Math.sqrt(variance[1] + varianceEpsilon) ]); }); it('simple batchnorm2D, 2x2', async () => { const xT = tf.tensor2d([2, 4, 9, 23], [2, 2]); const meanT = tf.tensor1d([1, 2]); const varianceT = tf.tensor1d([2, 3]); const offsetT = tf.tensor1d([3, 4]); const scaleT = tf.tensor1d([4, 5]); const varianceEpsilon = .001; const result = tf.batchNorm2d(xT, meanT, varianceT, offsetT, scaleT, varianceEpsilon); const offset = await offsetT.array(); const mean = await meanT.array(); const variance = await varianceT.array(); const scale = await scaleT.array(); const x = await xT.array(); expectArraysClose(await result.data(), [ offset[0] + (x[0][0] - mean[0]) * scale[0] / Math.sqrt(variance[0] + varianceEpsilon), offset[1] + (x[0][1] - mean[1]) * scale[1] / Math.sqrt(variance[1] + varianceEpsilon), offset[0] + (x[1][0] - mean[0]) * scale[0] / Math.sqrt(variance[0] + varianceEpsilon), offset[1] + (x[1][1] - mean[1]) * scale[1] / Math.sqrt(variance[1] + varianceEpsilon) ]); }); it('simple batchnorm2D gradients, 2x2', async () => { const x = tf.tensor2d([2, 4, 9, 23], [2, 2]); const mean = tf.tensor1d([1, 2]); const variance = tf.tensor1d([2, 3]); const offset = tf.tensor1d([3, 4]); const scale = tf.tensor1d([2, 5]); const varianceEpsilon = .001; const dy = tf.tensor2d([1, 1, 1, 1], [2, 2]); const [gradX, gradMean, gradVariance, gradOffset, gradScale] = tf.grads((x, mean, variance, offset, scale) => tf.batchNorm2d(x, mean, variance, offset, scale, varianceEpsilon))([x, mean, variance, offset, scale], dy); expectArraysClose(await gradX.data(), [1.414, 2.887, 1.414, 2.887]); expect(gradX.shape).toEqual([2, 2]); expectArraysClose(await gradMean.data(), [-2.828, -5.773]); expect(gradMean.shape).toEqual([2]); expectArraysClose(await gradVariance.data(), [-3.180, -11.060]); expect(gradVariance.shape).toEqual([2]); expectArraysClose(await gradOffset.data(), [2, 2]); expect(gradOffset.shape).toEqual([2]); expectArraysClose(await gradScale.data(), [6.362, 13.277]); expect(gradScale.shape).toEqual([2]); }); it('gradient with clones batchnorm2D', async () => { const x = tf.tensor2d([2, 4, 9, 23], [2, 2]); const mean = tf.tensor1d([1, 2]); const variance = tf.tensor1d([2, 3]); const offset = tf.tensor1d([3, 4]); const scale = tf.tensor1d([2, 5]); const varianceEpsilon = .001; const dy = tf.tensor2d([1, 1, 1, 1], [2, 2]); const [gradX, gradMean, gradVariance, gradOffset, gradScale] = tf.grads((x, mean, variance, offset, scale) => tf.batchNorm2d(x.clone(), mean.clone(), variance.clone(), offset.clone(), scale.clone(), varianceEpsilon) .clone())([x, mean, variance, offset, scale], dy); expectArraysClose(await gradX.data(), [1.414, 2.887, 1.414, 2.887]); expect(gradX.shape).toEqual([2, 2]); expectArraysClose(await gradMean.data(), [-2.828, -5.773]); expect(gradMean.shape).toEqual([2]); expectArraysClose(await gradVariance.data(), [-3.180, -11.060]); expect(gradVariance.shape).toEqual([2]); expectArraysClose(await gradOffset.data(), [2, 2]); expect(gradOffset.shape).toEqual([2]); expectArraysClose(await gradScale.data(), [6.362, 13.277]); expect(gradScale.shape).toEqual([2]); }); it('batchnorm2D gradients, same shapes in x, mean and variance', async () => { const x = tf.tensor2d([10, 20, 30, 40], [2, 2]); const mean = tf.tensor2d([0, 5, 10, 15], [2, 2]); const variance = tf.tensor2d([2, 4, 6, 8], [2, 2]); const scale = tf.tensor2d([2, 5, 2, 5], [2, 2]); const offset = tf.tensor2d([0, 0, 0, 0], [2, 2]); const varianceEpsilon = .001; const dy = tf.tensor2d([1, 1, 1, 1], [2, 2]); const gradX = tf.grad((x) => tf.batchNorm2d(x, mean, variance, offset, scale, varianceEpsilon))(x, dy); expectArraysClose(await gradX.data(), [1.414, 2.500, 0.816, 1.768]); expect(gradX.shape).toEqual([2, 2]); const gradMean = tf.grad((mean) => tf.batchNorm2d(x, mean, variance, offset, scale, varianceEpsilon))(mean, dy); expectArraysClose(await gradMean.data(), [-1.414, -2.500, -0.816, -1.768]); expect(gradMean.shape).toEqual([2, 2]); const gradVariance = tf.grad((variance) => tf.batchNorm2d(x, mean, variance, offset, scale, varianceEpsilon))(variance, dy); expectArraysClose(await gradVariance.data(), [-3.533, -4.686, -1.360, -2.762]); expect(gradVariance.shape).toEqual([2, 2]); const gradOffset = tf.grad((offset) => tf.batchNorm2d(x, mean, variance, offset, scale, varianceEpsilon))(offset, dy); expectArraysClose(await gradOffset.data(), [1, 1, 1, 1]); expect(gradOffset.shape).toEqual([2, 2]); const gradScale = tf.grad((scale) => tf.batchNorm2d(x, mean, variance, offset, scale, varianceEpsilon))(scale, dy); expectArraysClose(await gradScale.data(), [7.069, 7.499, 8.164, 8.838]); expect(gradScale.shape).toEqual([2, 2]); }); it('gradient with clones', () => { const x = tf.zeros([2, 2]); const mean = tf.zeros([2, 2]); const variance = tf.zeros([2, 2]); const scale = tf.zeros([2, 2]); const offset = tf.zeros([2, 2]); const varianceEpsilon = .001; const gradF = tf.grads((x, mean, variance, offset, scale) => tf.batchNorm2d(x.clone(), mean.clone(), variance.clone(), offset.clone(), scale.clone(), varianceEpsilon) .clone()); const [gradX, gradMean, gradVariance, gradOffset, gradScale] = gradF([x, mean, variance, offset, scale]); expect(gradX.shape).toEqual(x.shape); expect(gradMean.shape).toEqual(mean.shape); expect(gradVariance.shape).toEqual(variance.shape); expect(gradOffset.shape).toEqual(offset.shape); expect(gradScale.shape).toEqual(scale.shape); }); it('batchnorm2D matches tensorflow, 3x3', async () => { const x = tf.tensor2d([ 0.3136892, 0.92389025, 0.594782, 0.05021042, 0.67545404, 0.93910035, 0.13277993, 0.96474269, 0.88608916 ], [3, 3]); const mean = tf.tensor1d([0.19526312, 0.74857256, 0.45166398]); const variance = tf.tensor1d([0.22963001, 0.61521992, 0.46623685]); const offset = tf.tensor1d([0.43098484, 0.77712237, 0.47916298]); const scale = tf.tensor1d([0.62186907, 0.85673736, 0.19201061]); const varianceEpsilon = .001; const result = tf.batchNorm2d(x, mean, variance, offset, scale, varianceEpsilon); expectArraysClose(await result.data(), [ 0.58433646, 0.96846228, 0.51936529, 0.24315402, 0.69732157, 0.61608542, 0.35007446, 1.01304821, 0.60119441 ]); }); it('throws when passed x as a non-tensor', () => { const mean = tf.tensor1d([1, 2]); const variance = tf.tensor1d([2, 3]); expect(() => tf.batchNorm({}, mean, variance)) .toThrowError(/Argument 'x' passed to 'batchNorm' must be a Tensor/); }); it('throws when passed mean as a non-tensor', () => { const x = tf.tensor4d([2, 4, 9, 23], [2, 1, 1, 2]); const variance = tf.tensor1d([2, 3]); expect(() => tf.batchNorm(x, {}, variance)) .toThrowError(/Argument 'mean' passed to 'batchNorm' must be a Tensor/); }); it('throws when passed variance as a non-tensor', () => { const x = tf.tensor4d([2, 4, 9, 23], [2, 1, 1, 2]); const mean = tf.tensor1d([1, 2]); const e = /Argument 'variance' passed to 'batchNorm' must be a Tensor/; expect(() => tf.batchNorm(x, mean, {})).toThrowError(e); }); it('throws when passed scale as a non-tensor', () => { const x = tf.tensor4d([2, 4, 9, 23], [2, 1, 1, 2]); const mean = tf.tensor1d([1, 2]); const variance = tf.tensor1d([2, 3]); const epsilon = .001; expect(() => tf.batchNorm(x, mean, variance, epsilon, {})) .toThrowError(/Argument 'scale' passed to 'batchNorm' must be a Tensor/); }); it('throws when passed offset as a non-tensor', () => { const x = tf.tensor4d([2, 4, 9, 23], [2, 1, 1, 2]); const mean = tf.tensor1d([1, 2]); const variance = tf.tensor1d([2, 3]); const epsilon = .001; const scale = tf.tensor1d([0.62186907, 0.85673736, 0.19201061]); const e = /Argument 'offset' passed to 'batchNorm' must be a Tensor/; expect(() => tf.batchNorm(x, mean, variance, {}, scale, epsilon)) .toThrowError(e); }); it('accepts a tensor-like object', async () => { const x = [[2, 4], [9, 23]]; const mean = [1, 2]; const variance = [2, 3]; const offset = [3, 4]; const scale = [4, 5]; const varianceEpsilon = .001; const result = tf.batchNorm2d(x, mean, variance, offset, scale, varianceEpsilon); expectArraysClose(await result.data(), [ offset[0] + (x[0][0] - mean[0]) * scale[0] / Math.sqrt(variance[0] + varianceEpsilon), offset[1] + (x[0][1] - mean[1]) * scale[1] / Math.sqrt(variance[1] + varianceEpsilon), offset[0] + (x[1][0] - mean[0]) * scale[0] / Math.sqrt(variance[0] + varianceEpsilon), offset[1] + (x[1][1] - mean[1]) * scale[1] / Math.sqrt(variance[1] + varianceEpsilon) ]); }); it('throws error when x is a string tensor', () => { const mean = [1, 2]; const variance = [2, 3]; const offset = [3, 4]; const scale = [4, 5]; const varianceEpsilon = .001; const f = () => tf.batchNorm2d([['a', 'b'], ['c', 'd']], mean, variance, offset, scale, varianceEpsilon); expect(f).toThrowError(/Argument 'x' passed to 'batchNorm' must be numeric/); }); it('throws error when mean is a string tensor', () => { const x = [[2, 4], [9, 23]]; const variance = [2, 3]; const offset = [3, 4]; const scale = [4, 5]; const varianceEpsilon = .001; const f = () => tf.batchNorm2d(x, ['a', 'b'], variance, offset, scale, varianceEpsilon); expect(f).toThrowError(/Argument 'mean' passed to 'batchNorm' must be numeric/); }); it('throws error when variance is a string tensor', () => { const x = [[2, 4], [9, 23]]; const mean = [1, 2]; const offset = [3, 4]; const scale = [4, 5]; const varianceEpsilon = .001; const f = () => tf.batchNorm2d(x, mean, ['a', 'b'], offset, scale, varianceEpsilon); expect(f).toThrowError(/'variance' passed to 'batchNorm' must be numeric/); }); it('throws error when scale is a string tensor', () => { const x = [[2, 4], [9, 23]]; const mean = [1, 2]; const variance = [2, 3]; const offset = [3, 4]; const varianceEpsilon = .001; const f = () => tf.batchNorm2d(x, mean, variance, offset, ['a', 'b'], varianceEpsilon); expect(f).toThrowError(/'scale' passed to 'batchNorm' must be numeric/); }); it('throws error when offset is a string tensor', () => { const x = [[2, 4], [9, 23]]; const mean = [1, 2]; const variance = [2, 3]; const scale = [4, 5]; const varianceEpsilon = .001; const f = () => tf.batchNorm2d(x, mean, variance, ['a', 'b'], scale, varianceEpsilon); expect(f).toThrowError(/'offset' passed to 'batchNorm' must be numeric/); }); }); //# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiYmF0Y2hub3JtX3Rlc3QuanMiLCJzb3VyY2VSb290IjoiIiwic291cmNlcyI6WyIuLi8uLi8uLi8uLi8uLi8uLi90ZmpzLWNvcmUvc3JjL29wcy9iYXRjaG5vcm1fdGVzdC50cyJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiQUFBQTs7Ozs7Ozs7Ozs7Ozs7O0dBZUc7QUFFSCxPQUFPLEtBQUssRUFBRSxNQUFNLFVBQVUsQ0FBQztBQUMvQixPQUFPLEVBQUMsUUFBUSxFQUFFLGlCQUFpQixFQUFDLE1BQU0saUJBQWlCLENBQUM7QUFDNUQsT0FBTyxFQUFDLGlCQUFpQixFQUFDLE1BQU0sY0FBYyxDQUFDO0FBRS9DLGlCQUFpQixDQUFDLGFBQWEsRUFBRSxRQUFRLEVBQUUsR0FBRyxFQUFFO0lBQzlDLEVBQUUsQ0FBQyxpREFBaUQsRUFBRSxLQUFLLElBQUksRUFBRTtRQUMvRCxNQUFNLEVBQUUsR0FBRyxFQUFFLENBQUMsUUFBUSxDQUFDLENBQUMsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLEVBQUUsRUFBRSxDQUFDLEVBQUUsQ0FBQyxDQUFDLEVBQUUsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLENBQUMsQ0FBQyxDQUFDO1FBQ3BELE1BQU0sS0FBSyxHQUFHLEVBQUUsQ0FBQyxRQUFRLENBQUMsQ0FBQyxDQUFDLEVBQUUsQ0FBQyxDQUFDLENBQUMsQ0FBQztRQUNsQyxNQUFNLFNBQVMsR0FBRyxFQUFFLENBQUMsUUFBUSxDQUFDLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxDQUFDLENBQUM7UUFDdEMsTUFBTSxlQUFlLEdBQUcsSUFBSSxDQUFDO1FBRTdCLE1BQU0sTUFBTSxHQUFHLEVBQUUsQ0FBQyxXQUFXLENBQ3pCLEVBQUUsRUFBRSxLQUFLLEVBQUUsU0FBUyxFQUFFLFNBQVMsRUFBRSxTQUFTLEVBQUUsZUFBZSxDQUFDLENBQUM7UUFFakUsTUFBTSxDQUFDLEdBQUcsTUFBTSxFQUFFLENBQUMsS0FBSyxFQUFFLENBQUM7UUFDM0IsTUFBTSxJQUFJLEdBQUcsTUFBTSxLQUFLLENBQUMsS0FBSyxFQUFFLENBQUM7UUFDakMsTUFBTSxRQUFRLEdBQUcsTUFBTSxTQUFTLENBQUMsS0FBSyxFQUFFLENBQUM7UUFDekMsaUJBQWlCLENBQUMsTUFBTSxNQUFNLENBQUMsSUFBSSxFQUFFLEVBQUU7WUFDckMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLEdBQUcsSUFBSSxDQUFDLENBQUMsQ0FBQyxDQUFDLEdBQUcsQ0FBQyxHQUFHLElBQUksQ0FBQyxJQUFJLENBQUMsUUFBUSxDQUFDLENBQUMsQ0FBQyxHQUFHLGVBQWUsQ0FBQztZQUN4RSxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsR0FBRyxJQUFJLENBQUMsQ0FBQyxDQUFDLENBQUMsR0FBRyxDQUFDLEdBQUcsSUFBSSxDQUFDLElBQUksQ0FBQyxRQUFRLENBQUMsQ0FBQyxDQUFDLEdBQUcsZUFBZSxDQUFDO1lBQ3hFLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxHQUFHLElBQUksQ0FBQyxDQUFDLENBQUMsQ0FBQyxHQUFHLENBQUMsR0FBRyxJQUFJLENBQUMsSUFBSSxDQUFDLFFBQVEsQ0FBQyxDQUFDLENBQUMsR0FBRyxlQUFlLENBQUM7WUFDeEUsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLEdBQUcsSUFBSSxDQUFDLENBQUMsQ0FBQyxDQUFDLEdBQUcsQ0FBQyxHQUFHLElBQUksQ0FBQyxJQUFJLENBQUMsUUFBUSxDQUFDLENBQUMsQ0FBQyxHQUFHLGVBQWUsQ0FBQztTQUN6RSxDQUFDLENBQUM7SUFDTCxDQUFDLENBQUMsQ0FBQztJQUVILEVBQUUsQ0FBQyx3Q0FBd0MsRUFBRSxLQUFLLElBQUksRUFBRTtRQUN0RCxNQUFNLEVBQUUsR0FBRyxFQUFFLENBQUMsUUFBUSxDQUFDLENBQUMsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLEVBQUUsRUFBRSxDQUFDLEVBQUUsQ0FBQyxDQUFDLEVBQUUsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLENBQUMsQ0FBQyxDQUFDO1FBQ3BELE1BQU0sS0FBSyxHQUFHLEVBQUUsQ0FBQyxRQUFRLENBQUMsQ0FBQyxDQUFDLEVBQUUsQ0FBQyxDQUFDLENBQUMsQ0FBQztRQUNsQyxNQUFNLFNBQVMsR0FBRyxFQUFFLENBQUMsUUFBUSxDQUFDLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxDQUFDLENBQUM7UUFDdEMsTUFBTSxNQUFNLEdBQUcsRUFBRSxDQUFDLFFBQVEsQ0FBQyxDQUFDLENBQUMsRUFBRSxDQUFDLENBQUMsQ0FBQyxDQUFDO1FBQ25DLE1BQU0sZUFBZSxHQUFHLElBQUksQ0FBQztRQUU3QixNQUFNLE1BQU0sR0FBRyxFQUFFLENBQUMsV0FBVyxDQUN6QixFQUFFLEVBQUUsS0FBSyxFQUFFLFNBQVMsRUFBRSxTQUFTLEVBQUUsTUFBTSxFQUFFLGVBQWUsQ0FBQyxDQUFDO1FBQzlELE1BQU0sQ0FBQyxHQUFHLE1BQU0sRUFBRSxDQUFDLE1BQU0sRUFBRSxDQUFDO1FBQzVCLE1BQU0sSUFBSSxHQUFHLE1BQU0sS0FBSyxDQUFDLE1BQU0sRUFBRSxDQUFDO1FBQ2xDLE1BQU0sUUFBUSxHQUFHLE1BQU0sU0FBUyxDQUFDLE1BQU0sRUFBRSxDQUFDO1FBQzFDLE1BQU0sS0FBSyxHQUFHLE1BQU0sTUFBTSxDQUFDLE1BQU0sRUFBRSxDQUFDO1FBRXBDLGlCQUFpQixDQUFDLE1BQU0sTUFBTSxDQUFDLElBQUksRUFBRSxFQUFFO1lBQ3JDLENBQUMsQ0FBQyxDQUFDLEdBQUcsQ0FBQyxDQUFDLEVBQUUsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLENBQUMsR0FBRyxJQUFJLENBQUMsR0FBRyxDQUFDLENBQUMsQ0FBQyxDQUFDLEdBQUcsS0FBSyxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUM7Z0JBQzVDLElBQUksQ0FBQyxJQUFJLENBQUMsUUFBUSxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUMsR0FBRyxlQUFlLENBQUM7WUFDaEQsQ0FBQyxDQUFDLENBQUMsR0FBRyxDQUFDLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxFQUFFLENBQUMsQ0FBQyxHQUFHLElBQUksQ0FBQyxHQUFHLENBQUMsQ0FBQyxDQUFDLENBQUMsR0FBRyxLQUFLLENBQUMsR0FBRyxDQUFDLENBQUMsQ0FBQztnQkFDNUMsSUFBSSxDQUFDLElBQUksQ0FBQyxRQUFRLENBQUMsR0FBRyxDQUFDLENBQUMsQ0FBQyxHQUFHLGVBQWUsQ0FBQztZQUNoRCxDQUFDLENBQUMsQ0FBQyxHQUFHLENBQUMsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxDQUFDLEdBQUcsSUFBSSxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUMsQ0FBQyxHQUFHLEtBQUssQ0FBQyxHQUFHLENBQUMsQ0FBQyxDQUFDO2dCQUM1QyxJQUFJLENBQUMsSUFBSSxDQUFDLFFBQVEsQ0FBQyxHQUFHLENBQUMsQ0FBQyxDQUFDLEdBQUcsZUFBZSxDQUFDO1lBQ2hELENBQUMsQ0FBQyxDQUFDLEdBQUcsQ0FBQyxDQUFDLEVBQUUsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLENBQUMsR0FBRyxJQUFJLENBQUMsR0FBRyxDQUFDLENBQUMsQ0FBQyxDQUFDLEdBQUcsS0FBSyxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUM7Z0JBQzVDLElBQUksQ0FBQyxJQUFJLENBQUMsUUFBUSxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUMsR0FBRyxlQUFlLENBQUM7U0FDakQsQ0FBQyxDQUFDO0lBQ0wsQ0FBQyxDQUFDLENBQUM7SUFFSCxFQUFFLENBQUMsdUNBQXVDLEVBQUUsS0FBSyxJQUFJLEVBQUU7UUFDckQsTUFBTSxFQUFFLEdBQUcsRUFBRSxDQUFDLFFBQVEsQ0FBQyxDQUFDLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxFQUFFLEVBQUUsQ0FBQyxFQUFFLENBQUMsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxDQUFDLENBQUMsQ0FBQztRQUNwRCxNQUFNLEtBQUssR0FBRyxFQUFFLENBQUMsUUFBUSxDQUFDLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxDQUFDLENBQUM7UUFDbEMsTUFBTSxTQUFTLEdBQUcsRUFBRSxDQUFDLFFBQVEsQ0FBQyxDQUFDLENBQUMsRUFBRSxDQUFDLENBQUMsQ0FBQyxDQUFDO1FBQ3RDLE1BQU0sT0FBTyxHQUFHLEVBQUUsQ0FBQyxRQUFRLENBQUMsQ0FBQyxDQUFDLEVBQUUsQ0FBQyxDQUFDLENBQUMsQ0FBQztRQUVwQyxNQUFNLGVBQWUsR0FBRyxJQUFJLENBQUM7UUFFN0IsTUFBTSxNQUFNLEdBQUcsRUFBRSxDQUFDLFdBQVcsQ0FDekIsRUFBRSxFQUFFLEtBQUssRUFBRSxTQUFTLEVBQUUsT0FBTyxFQUFFLFNBQVMsRUFBRSxlQUFlLENBQUMsQ0FBQztRQUMvRCxNQUFNLENBQUMsR0FBRyxNQUFNLEVBQUUsQ0FBQyxNQUFNLEVBQUUsQ0FBQztRQUM1QixNQUFNLElBQUksR0FBRyxNQUFNLEtBQUssQ0FBQyxNQUFNLEVBQUUsQ0FBQztRQUNsQyxNQUFNLFFBQVEsR0FBRyxNQUFNLFNBQVMsQ0FBQyxNQUFNLEVBQUUsQ0FBQztRQUMxQyxNQUFNLE1BQU0sR0FBRyxNQUFNLE9BQU8sQ0FBQyxNQUFNLEVBQUUsQ0FBQztRQUV0QyxpQkFBaUIsQ0FBQyxNQUFNLE1BQU0sQ0FBQyxJQUFJLEVBQUUsRUFBRTtZQUNyQyxNQUFNLENBQUMsR0FBRyxDQUFDLENBQUMsQ0FBQztnQkFDVCxDQUFDLENBQUMsQ0FBQyxHQUFHLENBQUMsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxDQUFDLEdBQUcsSUFBSSxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUMsQ0FBQyxHQUFHLENBQUM7b0JBQ2pDLElBQUksQ0FBQyxJQUFJLENBQUMsUUFBUSxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUMsR0FBRyxlQUFlLENBQUM7WUFDcEQsTUFBTSxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUM7Z0JBQ1QsQ0FBQyxDQUFDLENBQUMsR0FBRyxDQUFDLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxFQUFFLENBQUMsQ0FBQyxHQUFHLElBQUksQ0FBQyxHQUFHLENBQUMsQ0FBQyxDQUFDLENBQUMsR0FBRyxDQUFDO29CQUNqQyxJQUFJLENBQUMsSUFBSSxDQUFDLFFBQVEsQ0FBQyxHQUFHLENBQUMsQ0FBQyxDQUFDLEdBQUcsZUFBZSxDQUFDO1lBQ3BELE1BQU0sQ0FBQyxHQUFHLENBQUMsQ0FBQyxDQUFDO2dCQUNULENBQUMsQ0FBQyxDQUFDLEdBQUcsQ0FBQyxDQUFDLEVBQUUsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLENBQUMsR0FBRyxJQUFJLENBQUMsR0FBRyxDQUFDLENBQUMsQ0FBQyxDQUFDLEdBQUcsQ0FBQztvQkFDakMsSUFBSSxDQUFDLElBQUksQ0FBQyxRQUFRLENBQUMsR0FBRyxDQUFDLENBQUMsQ0FBQyxHQUFHLGVBQWUsQ0FBQztZQUNwRCxNQUFNLENBQUMsR0FBRyxDQUFDLENBQUMsQ0FBQztnQkFDVCxDQUFDLENBQUMsQ0FBQyxHQUFHLENBQUMsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxDQUFDLEdBQUcsSUFBSSxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUMsQ0FBQyxHQUFHLENBQUM7b0JBQ2pDLElBQUksQ0FBQyxJQUFJLENBQUMsUUFBUSxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUMsR0FBRyxlQUFlLENBQUM7U0FDckQsQ0FBQyxDQUFDO0lBQ0wsQ0FBQyxDQUFDLENBQUM7SUFFSCxFQUFFLENBQUMsNkJBQTZCLEVBQUUsS0FBSyxJQUFJLEVBQUU7UUFDM0MsTUFBTSxFQUFFLEdBQUcsRUFBRSxDQUFDLFFBQVEsQ0FBQyxDQUFDLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxFQUFFLEVBQUUsQ0FBQyxFQUFFLENBQUMsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxDQUFDLENBQUMsQ0FBQztRQUNwRCxNQUFNLEtBQUssR0FBRyxFQUFFLENBQUMsUUFBUSxDQUFDLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxDQUFDLENBQUM7UUFDbEMsTUFBTSxTQUFTLEdBQUcsRUFBRSxDQUFDLFFBQVEsQ0FBQyxDQUFDLENBQUMsRUFBRSxDQUFDLENBQUMsQ0FBQyxDQUFDO1FBQ3RDLE1BQU0sT0FBTyxHQUFHLEVBQUUsQ0FBQyxRQUFRLENBQUMsQ0FBQyxDQUFDLEVBQUUsQ0FBQyxDQUFDLENBQUMsQ0FBQztRQUNwQyxNQUFNLE1BQU0sR0FBRyxFQUFFLENBQUMsUUFBUSxDQUFDLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxDQUFDLENBQUM7UUFFbkMsTUFBTSxlQUFlLEdBQUcsSUFBSSxDQUFDO1FBRTdCLE1BQU0sTUFBTSxHQUNSLEVBQUUsQ0FBQyxXQUFXLENBQUMsRUFBRSxFQUFFLEtBQUssRUFBRSxTQUFTLEVBQUUsT0FBTyxFQUFFLE1BQU0sRUFBRSxlQUFlLENBQUMsQ0FBQztRQUMzRSxNQUFNLENBQUMsR0FBRyxNQUFNLEVBQUUsQ0FBQyxNQUFNLEVBQUUsQ0FBQztRQUM1QixNQUFNLElBQUksR0FBRyxNQUFNLEtBQUssQ0FBQyxNQUFNLEVBQUUsQ0FBQztRQUNsQyxNQUFNLFFBQVEsR0FBRyxNQUFNLFNBQVMsQ0FBQyxNQUFNLEVBQUUsQ0FBQztRQUMxQyxNQUFNLEtBQUssR0FBRyxNQUFNLE1BQU0sQ0FBQyxNQUFNLEVBQUUsQ0FBQztRQUNwQyxNQUFNLE1BQU0sR0FBRyxNQUFNLE9BQU8sQ0FBQyxNQUFNLEVBQUUsQ0FBQztRQUV0QyxpQkFBaUIsQ0FBQyxNQUFNLE1BQU0sQ0FBQyxJQUFJLEVBQUUsRUFBRTtZQUNyQyxNQUFNLENBQUMsR0FBRyxDQUFDLENBQUMsQ0FBQztnQkFDVCxDQUFDLENBQUMsQ0FBQyxHQUFHLENBQUMsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxDQUFDLEdBQUcsSUFBSSxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUMsQ0FBQyxHQUFHLEtBQUssQ0FBQyxHQUFHLENBQUMsQ0FBQyxDQUFDO29CQUM1QyxJQUFJLENBQUMsSUFBSSxDQUFDLFFBQVEsQ0FBQyxHQUFHLENBQUMsQ0FBQyxDQUFDLEdBQUcsZUFBZSxDQUFDO1lBQ3BELE1BQU0sQ0FBQyxHQUFHLENBQUMsQ0FBQyxDQUFDO2dCQUNULENBQUMsQ0FBQyxDQUFDLEdBQUcsQ0FBQyxDQUFDLEVBQUUsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLENBQUMsR0FBRyxJQUFJLENBQUMsR0FBRyxDQUFDLENBQUMsQ0FBQyxDQUFDLEdBQUcsS0FBSyxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUM7b0JBQzVDLElBQUksQ0FBQyxJQUFJLENBQUMsUUFBUSxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUMsR0FBRyxlQUFlLENBQUM7WUFDcEQsTUFBTSxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUM7Z0JBQ1QsQ0FBQyxDQUFDLENBQUMsR0FBRyxDQUFDLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxFQUFFLENBQUMsQ0FBQyxHQUFHLElBQUksQ0FBQyxHQUFHLENBQUMsQ0FBQyxDQUFDLENBQUMsR0FBRyxLQUFLLENBQUMsR0FBRyxDQUFDLENBQUMsQ0FBQztvQkFDNUMsSUFBSSxDQUFDLElBQUksQ0FBQyxRQUFRLENBQUMsR0FBRyxDQUFDLENBQUMsQ0FBQyxHQUFHLGVBQWUsQ0FBQztZQUNwRCxNQUFNLENBQUMsR0FBRyxDQUFDLENBQUMsQ0FBQztnQkFDVCxDQUFDLENBQUMsQ0FBQyxHQUFHLENBQUMsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxDQUFDLEdBQUcsSUFBSSxDQUFDLEdBQUcsQ0FBQyxDQUFDLENBQUMsQ0FBQyxHQUFHLEtBQUssQ0FBQyxHQUFHLENBQUMsQ0FBQyxDQUFDO29CQUM1QyxJQUFJLENBQUMsSUFBSSxDQUFDLFFBQVEsQ0FBQyxHQUFHLENBQUMsQ0FBQyxDQUFDLEdBQUcsZUFBZSxDQUFDO1NBQ3JELENBQUMsQ0FBQztJQUNMLENBQUMsQ0FBQyxDQUFDO0lBRUgsRUFBRSxDQUFDLDhCQUE4QixFQUFFLEtBQUssSUFBSSxFQUFFO1FBQzVDLE1BQU0sQ0FBQyxHQUFHLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxDQUFDLENBQUMsRUFBRSxDQUFDLENBQUMsQ0FBQyxDQUFDLEVBQUUsRUFBRSxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBRSxVQUFVO1FBQ2hELE1BQU0sSUFBSSxHQUFHLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxDQUFDO1FBQ3BCLE1BQU0sUUFBUSxHQUFHLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxDQUFDO1FBQ3hCLE1BQU0sTUFBTSxHQUFHLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxDQUFDO1FBQ3RCLE1BQU0sS0FBSyxHQUFHLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxDQUFDO1FBRXJCLE1BQU0sZUFBZSxHQUFHLElBQUksQ0FBQztRQUU3QixNQUFNLE1BQU0sR0FDUixFQUFFLENBQUMsV0FBVyxDQUFDLENBQUMsRUFBRSxJQUFJLEVBQUUsUUFBUSxFQUFFLE1BQU0sRUFBRSxLQUFLLEVBQUUsZUFBZSxDQUFDLENBQUM7UUFFdEUsaUJBQWlCLENBQUMsTUFBTSxNQUFNLENBQUMsSUFBSSxFQUFFLEVBQUU7WUFDckMsTUFBTSxDQUFDLENBQUMsQ0FBQztnQkFDTCxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsR0FBRyxJQUFJLENBQUMsQ0FBQyxDQUFDLENBQUMsR0FBRyxLQUFLLENBQUMsQ0FBQyxDQUFDO29CQUNoQyxJQUFJLENBQUMsSUFBSSxDQUFDLFFBQVEsQ0FBQyxDQUFDLENBQUMsR0FBRyxlQUFlLENBQUM7WUFDaEQsTUFBTSxDQUFDLENBQUMsQ0FBQztnQkFDTCxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsR0FBRyxJQUFJLENBQUMsQ0FBQyxDQUFDLENBQUMsR0FBRyxLQUFLLENBQUMsQ0FBQyxDQUFDO29CQUNoQyxJQUFJLENBQUMsSUFBSSxDQUFDLFFBQVEsQ0FBQyxDQUFDLENBQUMsR0FBRyxlQUFlLENBQUM7WUFDaEQsTUFBTSxDQUFDLENBQUMsQ0FBQztnQkFDTCxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsR0FBRyxJQUFJLENBQUMsQ0FBQyxDQUFDLENBQUMsR0FBRyxLQUFLLENBQUMsQ0FBQyxDQUFDO29CQUNoQyxJQUFJLENBQUMsSUFBSSxDQUFDLFFBQVEsQ0FBQyxDQUFDLENBQUMsR0FBRyxlQUFlLENBQUM7WUFDaEQsTUFBTSxDQUFDLENBQUMsQ0FBQztnQkFDTCxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsR0FBRyxJQUFJLENBQUMsQ0FBQyxDQUFDLENBQUMsR0FBRyxLQUFLLENBQUMsQ0FBQyxDQUFDO29CQUNoQyxJQUFJLENBQUMsSUFBSSxDQUFDLFFBQVEsQ0FBQyxDQUFDLENBQUMsR0FBRyxlQUFlLENBQUM7U0FDakQsQ0FBQyxDQUFDO0lBQ0wsQ0FBQyxDQUFDLENBQUM7SUFFSCxFQUFFLENBQUMsdUNBQXVDLEVBQUUsS0FBSyxJQUFJLEVBQUU7UUFDckQsTUFBTSxDQUFDLEdBQUcsRUFBRSxDQUFDLFFBQVEsQ0FBQyxDQUFDLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxFQUFFLEVBQUUsQ0FBQyxFQUFFLENBQUMsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxDQUFDLENBQUMsQ0FBQztRQUNuRCxNQUFNLElBQUksR0FBRyxFQUFFLENBQUMsUUFBUSxDQUFDLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxDQUFDLENBQUM7UUFDakMsTUFBTSxRQUFRLEdBQUcsRUFBRSxDQUFDLFFBQVEsQ0FBQyxDQUFDLENBQUMsRUFBRSxDQUFDLENBQUMsQ0FBQyxDQUFDO1FBQ3JDLE1BQU0sTUFBTSxHQUFHLEVBQUUsQ0FBQyxRQUFRLENBQUMsQ0FBQyxDQUFDLEVBQUUsQ0FBQyxDQUFDLENBQUMsQ0FBQztRQUNuQyxNQUFNLEtBQUssR0FBRyxFQUFFLENBQUMsUUFBUSxDQUFDLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxDQUFDLENBQUM7UUFFbEMsTUFBTSxlQUFlLEdBQUcsSUFBSSxDQUFDO1FBRTdCLE1BQU0sRUFBRSxHQUFHLEVBQUUsQ0FBQyxRQUFRLENBQUMsQ0FBQyxDQUFDLENBQUMsRUFBRSxDQUFDLENBQUMsRUFBRSxDQUFDLENBQUMsRUFBRSxDQUFDLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxFQUFFLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQyxDQUFDLENBQUMsQ0FBQztRQUN2RCxNQUFNLEtBQUssR0FBRyxFQUFFLENBQUMsSUFBSSxDQUNqQixDQUFDLENBQWMsRUFBRSxFQUFFLENBQUMsRUFBRSxDQUFDLFdBQVcsQ0FDOUIsQ0FBQyxFQUFFLElBQUksRUFBRSxRQUFRLEVBQUUsTUFBTSxFQUFFLEtBQUssRUFBRSxlQUFlLENBQUMsQ0FBQyxDQUFDLENBQUMsRUFBRSxFQUFFLENBQUMsQ0FBQztRQUNuRSxpQkFBaUIsQ0FBQyxNQUFNLEtBQUssQ0FBQyxJQUFJLEVBQUUsRUFBRSxDQUFDLENBQUMsS0FBSyxFQUFFLENBQUMsS0FBSyxFQUFFLENBQUMsS0FBSyxFQUFFLENBQUMsS0FBSyxDQUFDLENBQUMsQ0FBQztRQUN4RSxNQUFNLENBQUMsS0FBSyxDQUFDLEtBQUssQ0FBQyxDQUFDLE9BQU8sQ0FBQyxDQUFDLENBQUMsRUFBRSxDQUFDLEVBQUUsQ0FBQy