@tensorflow/tfjs-layers
Version:
TensorFlow layers API in JavaScript
987 lines • 115 kB
JavaScript
/**
* @license
* Copyright 2018 Google LLC
*
* Use of this source code is governed by an MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT.
* =============================================================================
*/
/**
* TensorFlow.js Layers: Merge Layers.
*/
import * as tfc from '@tensorflow/tfjs-core';
import { serialization, tidy, util } from '@tensorflow/tfjs-core';
import * as K from '../backend/tfjs_backend';
import { Layer } from '../engine/topology';
import { NotImplementedError, ValueError } from '../errors';
import { l2Normalize } from '../losses';
import * as generic_utils from '../utils/generic_utils';
import * as mathUtils from '../utils/math_utils';
import { getExactlyOneShape } from '../utils/types_utils';
/**
* Generic Merge layer for element-wise merge functions.
*
* Used to implement `Sum`, `Average`, `Concatenate`, etc.
*/
export class Merge extends Layer {
constructor(args) {
super(args || {});
this.supportsMasking = true;
}
/**
* Logic for merging multiple tensors, to be overridden by subclasses.
* @param inputs
*/
mergeFunction(inputs) {
throw new NotImplementedError();
}
/**
* Computes the shape of the result of an elementwise operation.
*
* @param shape1: Shape of the first tensor.
* @param shape2: Shape of the second tensor.
* @returns Expected output shape when an elementwise operation is carried
* out on 2 tensors with shapes `shape1` and `shape2`.
* @throws ValueError: If `shape1` and `shape2` are not compatible for
* element-wise operations.
*/
computeElementwiseOpOutputShape(shape1, shape2) {
if (shape1 == null || shape2 == null) {
return null;
}
else if (shape1.length < shape2.length) {
return this.computeElementwiseOpOutputShape(shape2, shape1);
}
else if (shape2.length === 0) {
return shape1;
}
const outputShape = shape1.slice(0, shape1.length - shape2.length);
for (let k = 0; k < shape2.length; ++k) {
const i = shape1[shape1.length - shape2.length + k];
const j = shape2[k];
if (i == null || j == null || i < 0 || j < 0) {
outputShape.push(null);
}
else if (i === 1) {
outputShape.push(j);
}
else if (j === 1) {
outputShape.push(i);
}
else {
if (i !== j) {
throw new ValueError('Operands could not be broadcast together with shapes ' +
JSON.stringify(shape1) + ' ' + JSON.stringify(shape2));
}
outputShape.push(i);
}
}
return outputShape;
}
build(inputShape) {
// Used purely for shape validation.
if (Array.isArray(inputShape) && !Array.isArray(inputShape[0])) {
// Make sure that inputShape is an Array of shape.
inputShape = [getExactlyOneShape(inputShape)];
}
inputShape = inputShape;
if (inputShape.length < 2) {
throw new ValueError('A merge layer should be called on an Array of at least 2 inputs.' +
` Got ${inputShape.length} input(s).`);
}
// Make sure that there is at most one unique batch size among the input
// shapes.
let batchSizes = [];
for (const shape of inputShape) {
if (shape != null && shape[0] !== null) {
batchSizes.push(shape[0]);
}
}
batchSizes = generic_utils.unique(batchSizes);
if (batchSizes.length > 1) {
throw new ValueError(`Can not merge tensors with different batch sizes. ` +
`Got tensors with shapes: ${JSON.stringify(inputShape)}.`);
}
let outputShape = inputShape[0] == null ? null : inputShape[0].slice(1);
for (let i = 1; i < inputShape.length; ++i) {
const shape = inputShape[i] == null ? null : inputShape[i].slice(1);
outputShape = this.computeElementwiseOpOutputShape(outputShape, shape);
}
// If the inputs have different ranks, we have to reshape them to make them
// broadcastable.
const allRanks = inputShape.map(shape => shape.length);
if (inputShape.indexOf(null) === -1 &&
generic_utils.unique(allRanks).length === 1) {
this.reshapeRequired = false;
}
else {
this.reshapeRequired = true;
}
}
call(inputs, kwargs) {
return tidy(() => {
inputs = inputs;
if (this.reshapeRequired) {
const reshapedInputs = [];
const inputDims = inputs.map(input => input.rank);
if (inputDims.indexOf(null) === -1) {
// If ranks of all inputs are available, we simply expand each of them
// at axis=1 until all of them have the same rank.
const maxNDim = mathUtils.max(inputDims);
for (let x of inputs) {
const xNDim = x.rank;
for (let k = 0; k < maxNDim - xNDim; ++k) {
x = K.expandDims(x, 1);
}
reshapedInputs.push(x);
}
return this.mergeFunction(reshapedInputs);
}
else {
// Transpose all inputs so that batch size is the last dimension.
// [batchSize, dim1, dim2, ...] -> [dim1, dim2, ..., batchSize]
let transposed = false;
for (const x of inputs) {
const xNDim = x.rank;
if (xNDim == null) {
const xShape = x.shape;
const batchSize = xShape[0];
const newShape = xShape.slice(1).concat([batchSize]);
let xTransposed = tfc.reshape(x, [batchSize].concat(mathUtils.arrayProd(xShape.slice(1))));
xTransposed = tfc.transpose(xTransposed, [1, 0]);
xTransposed = tfc.reshape(xTransposed, newShape);
reshapedInputs.push(xTransposed);
transposed = true;
}
else if (xNDim > 1) {
const dims = mathUtils.range(1, xNDim).concat([0]);
reshapedInputs.push(tfc.transpose(x, dims));
transposed = true;
}
else {
// We don't transpose inputs if they are 1D vectors or scalars.
reshapedInputs.push(x);
}
}
let y = this.mergeFunction(reshapedInputs);
const yNDim = y.rank;
if (transposed) {
// If inputs have been transposed, we have to transpose the output
// too.
if (yNDim == null) {
const yShape = y.shape;
const yNDim = yShape.length;
const batchSize = yShape[yNDim - 1];
const newShape = [batchSize].concat(yShape.slice(0, yShape.length - 1));
y = tfc.reshape(tfc.transpose(tfc.reshape(y, [-1, batchSize]), [1, 0]), newShape);
}
else if (yNDim > 1) {
const dims = [yNDim - 1].concat(mathUtils.range(0, yNDim - 1));
y = tfc.transpose(y, dims);
}
}
return y;
}
}
else {
return this.mergeFunction(inputs);
}
});
}
computeOutputShape(inputShape) {
inputShape = inputShape;
let outputShape;
if (inputShape[0] == null) {
outputShape = null;
}
else {
outputShape = inputShape[0].slice(1);
}
for (let i = 1; i < inputShape.length; ++i) {
const shape = inputShape[i] == null ? null : inputShape[i].slice(1);
outputShape = this.computeElementwiseOpOutputShape(outputShape, shape);
}
let batchSizes = [];
for (const shape of inputShape) {
if (shape != null && shape[0] !== null) {
batchSizes.push(shape[0]);
}
}
batchSizes = generic_utils.unique(batchSizes);
if (batchSizes.length === 1) {
outputShape = batchSizes.concat(outputShape);
}
else {
outputShape = [null].concat(outputShape);
}
return outputShape;
}
computeMask(inputs, mask) {
return tfc.tidy(() => {
if (mask == null) {
return null;
}
if (!Array.isArray(mask)) {
throw new ValueError('`mask` should be an Array');
}
if (!Array.isArray(inputs)) {
throw new ValueError('`inputs` should be an Array');
}
if (mask.length !== inputs.length) {
throw new ValueError(`The Array 'inputs' and 'mask' are expected to have the same ` +
`length, but have different lengths ` +
`(${inputs.length} vs ${mask.length})`);
}
if (mask.every(m => m == null)) {
return null;
}
mask = mask.map(m => m == null ? m : tfc.expandDims(m, 0));
let output = mask[0];
for (let i = 1; i < mask.length - 1; ++i) {
output = tfc.logicalAnd(output, mask[i]);
}
return output;
});
}
}
class Add extends Merge {
constructor(args) {
super(args);
}
mergeFunction(inputs) {
return tidy(() => {
let output = inputs[0].clone();
for (let i = 1; i < inputs.length; ++i) {
output = tfc.add(output, inputs[i]);
}
return output;
});
}
}
/** @nocollapse */
Add.className = 'Add';
export { Add };
serialization.registerClass(Add);
/**
* Calculate the element-wise sum of inputs, which all have the same shape.
*
* This function can be invoked in three ways.
*
* 1. Construct an instance of `Add` layer, by using no input argument
* or a single configuration argument. The resultant `Add` layer can then
* be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example:
*
* ```js
* const addLayer = tf.layers.add();
*
* // The layer can be applied to inputs.
* const input1 = tf.input({shape: [2, 2]});
* const input2 = tf.input({shape: [2, 2]});
* const output = addLayer.apply([input1, input2]);
* console.log(output.shape);
* // You get [null, 2, 2], with the first dimension as the undetermined batch
* // dimension.
* ```
*
* 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs
* an `Layer` object internally and calls its `apply` method on the inputs,
* generating a new `tf.SymbolicTensor`. For example:
*
* ```js
* const input1 = tf.input({shape: [2, 2]});
* const input2 = tf.input({shape: [2, 2]});
* const output = tf.layers.add([input1, input2]);
* console.log(output.shape);
* // You get [null, 2, 2], with the first dimension as the undetermined batch
* // dimension.
* ```
*
* 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs
* an `Layer` object internally and calls its `apply` method on the inputs,
* generating a new `tf.Tensor` as the result of the computation. For
* example:
*
* ```js
* const input1 = tf.tensor2d([1, 2, 3, 4], [2, 2]);
* const input2 = tf.tensor2d([10, 20, 30, 40], [2, 2]);
* tf.layers.add([input1, input2]).print();
* // Gives [[11, 22], [33, 44]].
*
*/
export function add(config) {
if (Array.isArray(config)) {
const layer = new Add({});
return layer.apply(config);
}
else {
return new Add(config);
}
}
class Multiply extends Merge {
constructor(args) {
super(args);
}
mergeFunction(inputs) {
return tidy(() => {
let output = inputs[0].clone();
for (let i = 1; i < inputs.length; ++i) {
output = tfc.mul(output, inputs[i]);
}
return output;
});
}
}
/** @nocollapse */
Multiply.className = 'Multiply';
export { Multiply };
serialization.registerClass(Multiply);
/**
* Calculate the element-wise product of inputs, which all have the same shape.
*
* This function can be invoked in three ways.
*
* 1. Construct an instance of `Multiply` layer, by using no input argument
* or a single configuration argument. The resultant `Multiply` layer can
* then be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example:
*
* ```js
* const multiplyLayer = tf.layers.multiply();
*
* // The layer can be applied to inputs.
* const input1 = tf.input({shape: [2, 2]});
* const input2 = tf.input({shape: [2, 2]});
* const output = multiplyLayer.apply([input1, input2]);
* console.log(output.shape);
* // You get [null, 2, 2], with the first dimension as the undetermined batch
* // dimension.
* ```
*
* 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs
* an `Layer` object internally and calls its `apply` method on the inputs,
* generating a new `tf.SymbolicTensor`. For example:
*
* ```js
* const input1 = tf.input({shape: [2, 2]});
* const input2 = tf.input({shape: [2, 2]});
* const output = tf.layers.multiply([input1, input2]);
* console.log(output.shape);
* // You get [null, 2, 2], with the first dimension as the undetermined batch
* // dimension.
* ```
*
* 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs
* an `Layer` object internally and calls its `apply` method on the inputs,
* generating a new `tf.Tensor` as the result of the computation. For
* example:
*
* ```js
* const input1 = tf.tensor2d([1, 2, 3, 4], [2, 2]);
* const input2 = tf.tensor2d([10, 20, 30, 40], [2, 2]);
* tf.layers.multiply([input1, input2]).print();
* // Gives [[10, 40], [90, 160]].
*
*/
export function multiply(config) {
if (Array.isArray(config)) {
const layer = new Multiply({});
return layer.apply(config);
}
else {
return new Multiply(config);
}
}
class Average extends Merge {
constructor(args) {
super(args);
}
mergeFunction(inputs) {
return tidy(() => {
let output = inputs[0].clone();
for (let i = 1; i < inputs.length; ++i) {
output = tfc.add(output, inputs[i]);
}
return tfc.mul(1 / inputs.length, output);
});
}
}
/** @nocollapse */
Average.className = 'Average';
export { Average };
serialization.registerClass(Average);
/**
* Calculate the element-wise arithmetic mean of inputs, which all have the same
* shape.
*
* This function can be invoked in three ways.
*
* 1. Construct an instance of `Average` layer, by using no input argument
* or a single configuration argument. The resultant `Average` layer can then
* be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example:
*
* ```js
* const averageLayer = tf.layers.average();
*
* // The layer can be applied to inputs.
* const input1 = tf.input({shape: [2, 2]});
* const input2 = tf.input({shape: [2, 2]});
* const output = averageLayer.apply([input1, input2]);
* console.log(output.shape);
* // You get [null, 2, 2], with the first dimension as the undetermined batch
* // dimension.
* ```
*
* 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs
* an `Layer` object internally and calls its `apply` method on the inputs,
* generating a new `tf.SymbolicTensor`. For example:
*
* ```js
* const input1 = tf.input({shape: [2, 2]});
* const input2 = tf.input({shape: [2, 2]});
* const output = tf.layers.average([input1, input2]);
* console.log(output.shape);
* // You get [null, 2, 2], with the first dimension as the undetermined batch
* // dimension.
* ```
*
* 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs
* an `Layer` object internally and calls its `apply` method on the inputs,
* generating a new `tf.Tensor` as the result of the computation. For
* example:
*
* ```js
* const input1 = tf.tensor2d([1, 2, 3, 4], [2, 2]);
* const input2 = tf.tensor2d([10, 20, 30, 40], [2, 2]);
* tf.layers.average([input1, input2]).print();
* // Gives [[5.5, 11], [16.5, 22]].
*
*/
export function average(config) {
if (Array.isArray(config)) {
const layer = new Average({});
return layer.apply(config);
}
else {
return new Average(config);
}
}
class Maximum extends Merge {
constructor(args) {
super(args);
}
mergeFunction(inputs) {
return tidy(() => {
let output = inputs[0];
for (let i = 1; i < inputs.length; ++i) {
output = tfc.maximum(output, inputs[i]);
}
return output;
});
}
}
/** @nocollapse */
Maximum.className = 'Maximum';
export { Maximum };
serialization.registerClass(Maximum);
/**
* Calculate the element-wise maximum of inputs, which all have the same shape.
*
* This function can be invoked in three ways.
*
* 1. Construct an instance of `Maximum` layer, by using no input argument
* or a single configuration argument. The resultant `Maximum` layer can then
* be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example:
*
* ```js
* const maximumLayer = tf.layers.maximum();
*
* // The layer can be applied to inputs.
* const input1 = tf.input({shape: [2, 2]});
* const input2 = tf.input({shape: [2, 2]});
* const output = maximumLayer.apply([input1, input2]);
* console.log(output.shape);
* // You get [null, 2, 2], with the first dimension as the undetermined batch
* // dimension.
* ```
*
* 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs
* an `Layer` object internally and calls its `apply` method on the inputs,
* generating a new `tf.SymbolicTensor`. For example:
*
* ```js
* const input1 = tf.input({shape: [2, 2]});
* const input2 = tf.input({shape: [2, 2]});
* const output = tf.layers.maximum([input1, input2]);
* console.log(output.shape);
* // You get [null, 2, 2], with the first dimension as the undetermined batch
* // dimension.
* ```
*
* 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs
* an `Layer` object internally and calls its `apply` method on the inputs,
* generating a new `tf.Tensor` as the result of the computation. For
* example:
*
* ```js
* const input1 = tf.tensor2d([1, 20, 3, 40], [2, 2]);
* const input2 = tf.tensor2d([10, 2, 30, 4], [2, 2]);
* tf.layers.maximum([input1, input2]).print();
* // Gives [[10, 20], [30, 40]].
*
*/
export function maximum(config) {
if (Array.isArray(config)) {
const layer = new Maximum({});
return layer.apply(config);
}
else {
return new Maximum(config);
}
}
class Minimum extends Merge {
constructor(args) {
super(args);
}
mergeFunction(inputs) {
return tidy(() => {
let output = inputs[0];
for (let i = 1; i < inputs.length; ++i) {
output = tfc.minimum(output, inputs[i]);
}
return output;
});
}
}
/** @nocollapse */
Minimum.className = 'Minimum';
export { Minimum };
serialization.registerClass(Minimum);
/**
* Calculate the element-wise minimum of inputs, which all have the same shape.
*
* This function can be invoked in three ways.
*
* 1. Construct an instance of `Minimum` layer, by using no input argument
* or a single configuration argument. The resultant `Minimum` layer can then
* be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example:
*
* ```js
* const minimumLayer = tf.layers.minimum();
*
* // The layer can be applied to inputs.
* const input1 = tf.input({shape: [2, 2]});
* const input2 = tf.input({shape: [2, 2]});
* const output = minimumLayer.apply([input1, input2]);
* console.log(output.shape);
* // You get [null, 2, 2], with the first dimension as the undetermined batch
* // dimension.
* ```
*
* 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs
* an `Layer` object internally and calls its `apply` method on the inputs,
* generating a new `tf.SymbolicTensor`. For example:
*
* ```js
* const input1 = tf.input({shape: [2, 2]});
* const input2 = tf.input({shape: [2, 2]});
* const output = tf.layers.minimum([input1, input2]);
* console.log(output.shape);
* // You get [null, 2, 2], with the first dimension as the undetermined batch
* // dimension.
* ```
*
* 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs
* an `Layer` object internally and calls its `apply` method on the inputs,
* generating a new `tf.Tensor` as the result of the computation. For
* example:
*
* ```js
* const input1 = tf.tensor2d([1, 20, 3, 40], [2, 2]);
* const input2 = tf.tensor2d([10, 2, 30, 4], [2, 2]);
* tf.layers.minimum([input1, input2]).print();
* // Gives [[1, 2], [3, 4]].
*
*/
export function minimum(config) {
if (Array.isArray(config)) {
const layer = new Minimum({});
return layer.apply(config);
}
else {
return new Minimum(config);
}
}
class Concatenate extends Merge {
constructor(args) {
super(args);
this.DEFAULT_AXIS = -1;
if (args == null) {
args = {};
}
this.axis = args.axis == null ? this.DEFAULT_AXIS : args.axis;
this.supportsMasking = true;
this.reshapeRequired = false;
}
build(inputShape) {
// Used purely for shape validation.]
if (!(Array.isArray(inputShape) && Array.isArray(inputShape[0])) ||
inputShape.length === 1) {
throw new ValueError('A `Concatenate` layer should be called on a list of at least 2 ' +
'inputs');
}
inputShape = inputShape;
let allNoneShape = true;
for (const shape of inputShape) {
if (shape != null) {
allNoneShape = false;
break;
}
}
if (allNoneShape) {
return;
}
const shapeSet = [];
for (let i = 0; i < inputShape.length; ++i) {
const shapeWithoutConcatAxis = inputShape[i].slice();
shapeWithoutConcatAxis.splice(this.axis, 1);
let exists = false;
for (const shape of shapeSet) {
if (util.arraysEqual(shape, shapeWithoutConcatAxis)) {
exists = true;
break;
}
}
if (!exists) {
shapeSet.push(shapeWithoutConcatAxis);
}
}
if (shapeSet.length > 1) {
throw new ValueError('A `Concatenate` layer requires inputs with matching shapes ' +
'except for the concat axis. Got input shapes: ' +
JSON.stringify(inputShape));
}
}
mergeFunction(inputs) {
return tidy(() => {
return K.concatenate(inputs, this.axis);
});
}
computeOutputShape(inputShape) {
if (!(Array.isArray(inputShape) && Array.isArray(inputShape[0]))) {
throw new ValueError('A `Concatenate` layer should be called on a list of inputs.');
}
const inputShapes = inputShape;
const outputShape = inputShapes[0].slice();
const axis = this.axis < 0 ? outputShape.length + this.axis : this.axis;
// Porting Note: the line above is because TypeScript doesn't support
// negative indices.
for (const shape of inputShapes.slice(1)) {
if (outputShape[axis] == null || shape[axis] == null) {
outputShape[axis] = null;
break;
}
outputShape[axis] += shape[axis];
}
return outputShape;
}
computeMask(inputs, mask) {
if (mask == null) {
return null;
}
if (!Array.isArray(mask)) {
throw new ValueError('`mask` should be an array for Concatenate');
}
if (!Array.isArray(inputs)) {
throw new ValueError('`inputs` should be an array for Concatenate');
}
if (mask.length !== inputs.length) {
throw new ValueError(`Mismatch in the length of mask (${mask.length}) ` +
`and the legnth of inputs (${inputs.length})`);
}
return tfc.tidy(() => {
let allNullMasks = true;
mask.forEach(m => {
if (m != null) {
allNullMasks = false;
return;
}
});
if (allNullMasks) {
return null;
}
const outputMasks = [];
for (let i = 0; i < inputs.length; ++i) {
if (mask[i] == null) {
// Input is unmasked. Append all 1's to masks.
outputMasks.push(tfc.cast(tfc.onesLike(inputs[i]), 'bool'));
}
else if (mask[i].rank < inputs[i].rank) {
// Mask is smaller than the input, expand it.
outputMasks.push(tfc.expandDims(mask[i], -1));
}
else {
outputMasks.push(mask[i]);
}
}
const concatenatedMasks = tfc.concat(outputMasks, this.axis);
return tfc.all(concatenatedMasks, -1, false);
});
}
getConfig() {
const config = {
'axis': this.axis,
};
const baseConfig = super.getConfig();
Object.assign(config, baseConfig);
return config;
}
}
/** @nocollapse */
Concatenate.className = 'Concatenate';
export { Concatenate };
serialization.registerClass(Concatenate);
/**
* Concatenate an `Array` of inputs.
*
* This function can be invoked in three ways.
*
* 1. Construct an instance of `Concatenate` layer, by using no input argument
* or a single configuration argument. The resultant `Concatenate` layer can
* then be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example:
*
* ```js
* const concatLayer = tf.layers.concatenate();
*
* // The layer can be applied to inputs.
* const input1 = tf.input({shape: [2, 3]});
* const input2 = tf.input({shape: [2, 4]});
* const output = concatLayer.apply([input1, input2]);
* console.log(output.shape);
* // You get [null, 2, 7], with the first dimension as the undetermined batch
* // dimension and the last dimension as the result of concatenating the
* // last dimensions of the two inputs.
* ```
*
* 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs
* an `Layer` object internally and calls its `apply` method on the inputs,
* generating a new `tf.SymbolicTensor`. For example:
*
* ```js
* const input1 = tf.input({shape: [2, 3]});
* const input2 = tf.input({shape: [2, 4]});
* const output = tf.layers.concatenate([input1, input2]);
* console.log(output.shape);
* // You get [null, 2, 2], with the first dimension as the undetermined batch
* // dimension and the last dimension as the result of concatenating the
* // last dimensions of the two inputs.
* ```
*
* 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs
* an `Layer` object internally and calls its `apply` method on the inputs,
* generating a new `tf.Tensor` as the result of the computation. For
* example:
*
* ```js
* const input1 = tf.tensor2d([[1, 2], [3, 4]], [2, 2]);
* const input2 = tf.tensor2d([[10, 20], [30, 40]], [2, 2]);
* tf.layers.concatenate([input1, input2]).print();
* // Gives [[1, 2, 10, 20], [3, 4, 30, 40]].
*
*/
export function concatenate(config) {
if (Array.isArray(config)) {
const layer = new Concatenate({});
return layer.apply(config);
}
else {
return new Concatenate(config);
}
}
/**
* Interpretable potentially negative axis index.
*
* For example, given axis = -1, and dim = 3, this function will return 2.
*
* @param axis The axis index, may be a positive, zero or negative integer.
* @param dim Total number of dimensions, a positive integer.
* @returns A non-negative axis index equivalent to the input `axis`.
*/
function interpretAxis(axis, dim) {
while (axis < 0) {
axis += dim;
}
return axis;
}
function batchDot(x, y, axes) {
if (x.shape.length > 3 || y.shape.length > 3) {
throw new NotImplementedError('batchDot is not implemented for tensors of 4D or higher rank yet');
}
tfc.util.assert(x.shape.length >= 2, () => `batchDot requires the rank of x to be >= 2, ` +
`but got ${x.shape.length}`);
tfc.util.assert(x.shape.length >= 2, () => `batchDot requires the rank of y to be >= 2, ` +
`but got ${y.shape.length}`);
if (typeof axes === 'number') {
axes = [axes, axes];
}
if (x.dtype === 'complex64' || y.dtype === 'complex64') {
throw new NotImplementedError('batchDot is not implemented for complex64-type Tensors yet.');
}
const xNDim = x.shape.length;
const yNDim = y.shape.length;
if (axes == null) {
// Behave like batchMatmul by default.
axes = [xNDim - 1, yNDim - 2];
}
const axesArray = axes;
return tfc.tidy(() => {
let diff;
if (xNDim > yNDim) {
diff = xNDim - yNDim;
const diffShape = [];
for (let i = 0; i < diff; ++i) {
diffShape.push(1);
}
y = tfc.reshape(y, y.shape.concat(diffShape));
}
else if (yNDim > xNDim) {
diff = yNDim - xNDim;
const diffShape = [];
for (let i = 0; i < diff; ++i) {
diffShape.push(1);
}
x = tfc.reshape(x, x.shape.concat(diffShape));
}
else {
diff = 0;
}
let out;
if (x.shape.length === 2 && y.shape.length === 2) {
if (axesArray[0] === axesArray[1]) {
out = tfc.sum(tfc.mul(x, y), axesArray[0]);
}
else {
out = tfc.sum(tfc.mul(tfc.transpose(x, [1, 0]), y), axesArray[1]);
}
}
else {
const adjX = axesArray[0] !== x.shape.length - 1;
const adjY = axesArray[1] === y.shape.length - 1;
out = tfc.matMul(x, y, adjX, adjY);
}
if (diff > 0) {
let idx;
if (xNDim > yNDim) {
idx = xNDim + yNDim - 3;
}
else {
idx = xNDim - 1;
}
const squeezeAxes = [];
for (let i = idx; i < idx + diff; ++i) {
squeezeAxes.push(i);
}
out = tfc.squeeze(out, squeezeAxes);
}
if (out.shape.length === 1) {
out = tfc.expandDims(out, 1);
}
return out;
});
}
class Dot extends Merge {
constructor(args) {
super(args);
this.axes = args.axes;
this.normalize = args.normalize == null ? false : args.normalize;
this.supportsMasking = true;
this.reshapeRequired = false;
}
build(inputShape) {
tfc.util.assert(Array.isArray(inputShape) && inputShape.length === 2 &&
Array.isArray(inputShape[0]) && Array.isArray(inputShape[1]), () => 'A `Dot` layer should be called on a list of exactly 2 inputs.');
const shape1 = inputShape[0];
const shape2 = inputShape[1];
if (shape1.length > 3 || shape2.length > 3) {
throw new NotImplementedError('Dot layer does not support tensors of 4D or higher rank yet.');
}
const axes = this.interpretAxes(shape1, shape2);
if (shape1[axes[0]] !== shape2[axes[1]]) {
throw new ValueError(`Dimension incompatibility: ` +
`${shape1[axes[0]]} !== ${shape2[axes[1]]}`);
}
}
mergeFunction(inputs) {
if (inputs.length !== 2) {
throw new ValueError('A `Dot` layer must be called on exactly 2 inputs, ' +
`but received ${inputs.length} input(s).`);
}
let x1 = inputs[0];
let x2 = inputs[1];
let axes;
if (!Array.isArray(this.axes)) {
axes = [
interpretAxis(this.axes, x1.shape.length),
interpretAxis(this.axes, x2.shape.length)
];
}
else {
axes = this.axes.map((axis, i) => interpretAxis(axis, inputs[i].shape.length));
}
if (this.normalize) {
x1 = l2Normalize(x1, axes[0]);
x2 = l2Normalize(x2, axes[1]);
}
return batchDot(x1, x2, axes);
}
interpretAxes(shape1, shape2) {
let axes;
if (!Array.isArray(this.axes)) {
// `this.axes` is a single integer.
axes = [
interpretAxis(this.axes, shape1.length),
interpretAxis(this.axes, shape2.length)
];
}
else {
// `this.axes` is an Array of integers.
axes = this.axes;
}
return axes;
}
computeOutputShape(inputShape) {
tfc.util.assert(Array.isArray(inputShape) && inputShape.length === 2 &&
Array.isArray(inputShape[0]) && Array.isArray(inputShape[1]), () => 'A `Dot` layer should be called on a list of exactly 2 inputs.');
const shape1 = inputShape[0].slice();
const shape2 = inputShape[1].slice();
if (shape1.length > 3 || shape2.length > 3) {
throw new NotImplementedError('Dot layer does not support tensors of 4D or higher rank yet.');
}
const axes = this.interpretAxes(shape1, shape2);
shape1.splice(axes[0], 1);
shape2.splice(axes[1], 1);
shape2.splice(0, 1);
const outputShape = shape1.concat(shape2);
if (outputShape.length === 1) {
outputShape.push(1);
}
return outputShape;
}
computeMask(inputs, mask) {
return null;
}
getConfig() {
const config = {
'axes': this.axes,
'normalize': this.normalize
};
const baseConfig = super.getConfig();
Object.assign(config, baseConfig);
return config;
}
}
/** @nocollapse */
Dot.className = 'Dot';
export { Dot };
serialization.registerClass(Dot);
// TODO(cais): Add functional interfaces for the merge layers.
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoibWVyZ2UuanMiLCJzb3VyY2VSb290IjoiIiwic291cmNlcyI6WyIuLi8uLi8uLi8uLi8uLi8uLi90ZmpzLWxheWVycy9zcmMvbGF5ZXJzL21lcmdlLnRzIl0sIm5hbWVzIjpbXSwibWFwcGluZ3MiOiJBQUFBOzs7Ozs7OztHQVFHO0FBRUg7O0dBRUc7QUFFSCxPQUFPLEtBQUssR0FBRyxNQUFNLHVCQUF1QixDQUFDO0FBQzdDLE9BQU8sRUFBQyxhQUFhLEVBQVUsSUFBSSxFQUFFLElBQUksRUFBQyxNQUFNLHVCQUF1QixDQUFDO0FBQ3hFLE9BQU8sS0FBSyxDQUFDLE1BQU0seUJBQXlCLENBQUM7QUFDN0MsT0FBTyxFQUFDLEtBQUssRUFBNEIsTUFBTSxvQkFBb0IsQ0FBQztBQUNwRSxPQUFPLEVBQUMsbUJBQW1CLEVBQUUsVUFBVSxFQUFDLE1BQU0sV0FBVyxDQUFDO0FBRTFELE9BQU8sRUFBQyxXQUFXLEVBQUMsTUFBTSxXQUFXLENBQUM7QUFFdEMsT0FBTyxLQUFLLGFBQWEsTUFBTSx3QkFBd0IsQ0FBQztBQUN4RCxPQUFPLEtBQUssU0FBUyxNQUFNLHFCQUFxQixDQUFDO0FBQ2pELE9BQU8sRUFBQyxrQkFBa0IsRUFBQyxNQUFNLHNCQUFzQixDQUFDO0FBRXhEOzs7O0dBSUc7QUFDSCxNQUFNLE9BQWdCLEtBQU0sU0FBUSxLQUFLO0lBR3ZDLFlBQVksSUFBZ0I7UUFDMUIsS0FBSyxDQUFDLElBQUksSUFBSSxFQUFFLENBQUMsQ0FBQztRQUNsQixJQUFJLENBQUMsZUFBZSxHQUFHLElBQUksQ0FBQztJQUM5QixDQUFDO0lBRUQ7OztPQUdHO0lBQ08sYUFBYSxDQUFDLE1BQWdCO1FBQ3RDLE1BQU0sSUFBSSxtQkFBbUIsRUFBRSxDQUFDO0lBQ2xDLENBQUM7SUFFRDs7Ozs7Ozs7O09BU0c7SUFDSywrQkFBK0IsQ0FBQyxNQUFhLEVBQUUsTUFBYTtRQUNsRSxJQUFJLE1BQU0sSUFBSSxJQUFJLElBQUksTUFBTSxJQUFJLElBQUksRUFBRTtZQUNwQyxPQUFPLElBQUksQ0FBQztTQUNiO2FBQU0sSUFBSSxNQUFNLENBQUMsTUFBTSxHQUFHLE1BQU0sQ0FBQyxNQUFNLEVBQUU7WUFDeEMsT0FBTyxJQUFJLENBQUMsK0JBQStCLENBQUMsTUFBTSxFQUFFLE1BQU0sQ0FBQyxDQUFDO1NBQzdEO2FBQU0sSUFBSSxNQUFNLENBQUMsTUFBTSxLQUFLLENBQUMsRUFBRTtZQUM5QixPQUFPLE1BQU0sQ0FBQztTQUNmO1FBQ0QsTUFBTSxXQUFXLEdBQVUsTUFBTSxDQUFDLEtBQUssQ0FBQyxDQUFDLEVBQUUsTUFBTSxDQUFDLE1BQU0sR0FBRyxNQUFNLENBQUMsTUFBTSxDQUFDLENBQUM7UUFDMUUsS0FBSyxJQUFJLENBQUMsR0FBRyxDQUFDLEVBQUUsQ0FBQyxHQUFHLE1BQU0sQ0FBQyxNQUFNLEVBQUUsRUFBRSxDQUFDLEVBQUU7WUFDdEMsTUFBTSxDQUFDLEdBQUcsTUFBTSxDQUFDLE1BQU0sQ0FBQyxNQUFNLEdBQUcsTUFBTSxDQUFDLE1BQU0sR0FBRyxDQUFDLENBQUMsQ0FBQztZQUNwRCxNQUFNLENBQUMsR0FBRyxNQUFNLENBQUMsQ0FBQyxDQUFDLENBQUM7WUFDcEIsSUFBSSxDQUFDLElBQUksSUFBSSxJQUFJLENBQUMsSUFBSSxJQUFJLElBQUksQ0FBQyxHQUFHLENBQUMsSUFBSSxDQUFDLEdBQUcsQ0FBQyxFQUFFO2dCQUM1QyxXQUFXLENBQUMsSUFBSSxDQUFDLElBQUksQ0FBQyxDQUFDO2FBQ3hCO2lCQUFNLElBQUksQ0FBQyxLQUFLLENBQUMsRUFBRTtnQkFDbEIsV0FBVyxDQUFDLElBQUksQ0FBQyxDQUFDLENBQUMsQ0FBQzthQUNyQjtpQkFBTSxJQUFJLENBQUMsS0FBSyxDQUFDLEVBQUU7Z0JBQ2xCLFdBQVcsQ0FBQyxJQUFJLENBQUMsQ0FBQyxDQUFDLENBQUM7YUFDckI7aUJBQU07Z0JBQ0wsSUFBSSxDQUFDLEtBQUssQ0FBQyxFQUFFO29CQUNYLE1BQU0sSUFBSSxVQUFVLENBQ2hCLHVEQUF1RDt3QkFDdkQsSUFBSSxDQUFDLFNBQVMsQ0FBQyxNQUFNLENBQUMsR0FBRyxHQUFHLEdBQUcsSUFBSSxDQUFDLFNBQVMsQ0FBQyxNQUFNLENBQUMsQ0FBQyxDQUFDO2lCQUM1RDtnQkFDRCxXQUFXLENBQUMsSUFBSSxDQUFDLENBQUMsQ0FBQyxDQUFDO2FBQ3JCO1NBQ0Y7UUFDRCxPQUFPLFdBQVcsQ0FBQztJQUNyQixDQUFDO0lBRVEsS0FBSyxDQUFDLFVBQXlCO1FBQ3RDLG9DQUFvQztRQUNwQyxJQUFJLEtBQUssQ0FBQyxPQUFPLENBQUMsVUFBVSxDQUFDLElBQUksQ0FBQyxLQUFLLENBQUMsT0FBTyxDQUFDLFVBQVUsQ0FBQyxDQUFDLENBQUMsQ0FBQyxFQUFFO1lBQzlELGtEQUFrRDtZQUNsRCxVQUFVLEdBQUcsQ0FBQyxrQkFBa0IsQ0FBQyxVQUFVLENBQUMsQ0FBQyxDQUFDO1NBQy9DO1FBQ0QsVUFBVSxHQUFHLFVBQXFCLENBQUM7UUFDbkMsSUFBSSxVQUFVLENBQUMsTUFBTSxHQUFHLENBQUMsRUFBRTtZQUN6QixNQUFNLElBQUksVUFBVSxDQUNoQixrRUFBa0U7Z0JBQ2xFLFFBQVEsVUFBVSxDQUFDLE1BQU0sWUFBWSxDQUFDLENBQUM7U0FDNUM7UUFFRCx3RUFBd0U7UUFDeEUsVUFBVTtRQUNWLElBQUksVUFBVSxHQUFhLEVBQUUsQ0FBQztRQUM5QixLQUFLLE1BQU0sS0FBSyxJQUFJLFVBQVUsRUFBRTtZQUM5QixJQUFJLEtBQUssSUFBSSxJQUFJLElBQUksS0FBSyxDQUFDLENBQUMsQ0FBQyxLQUFLLElBQUksRUFBRTtnQkFDdEMsVUFBVSxDQUFDLElBQUksQ0FBQyxLQUFLLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQzthQUMzQjtTQUNGO1FBQ0QsVUFBVSxHQUFHLGFBQWEsQ0FBQyxNQUFNLENBQUMsVUFBVSxDQUFDLENBQUM7UUFDOUMsSUFBSSxVQUFVLENBQUMsTUFBTSxHQUFHLENBQUMsRUFBRTtZQUN6QixNQUFNLElBQUksVUFBVSxDQUNoQixvREFBb0Q7Z0JBQ3BELDRCQUE0QixJQUFJLENBQUMsU0FBUyxDQUFDLFVBQVUsQ0FBQyxHQUFHLENBQUMsQ0FBQztTQUNoRTtRQUVELElBQUksV0FBVyxHQUNYLFVBQVUsQ0FBQyxDQUFDLENBQUMsSUFBSSxJQUFJLENBQUMsQ0FBQyxDQUFDLElBQUksQ0FBQyxDQUFDLENBQUMsVUFBVSxDQUFDLENBQUMsQ0FBQyxDQUFDLEtBQUssQ0FBQyxDQUFDLENBQUMsQ0FBQztRQUMxRCxLQUFLLElBQUksQ0FBQyxHQUFHLENBQUMsRUFBRSxDQUFDLEdBQUcsVUFBVSxDQUFDLE1BQU0sRUFBRSxFQUFFLENBQUMsRUFBRTtZQUMxQyxNQUFNLEtBQUssR0FBRyxVQUFVLENBQUMsQ0FBQyxDQUFDLElBQUksSUFBSSxDQUFDLENBQUMsQ0FBQyxJQUFJLENBQUMsQ0FBQyxDQUFDLFVBQVUsQ0FBQyxDQUFDLENBQUMsQ0FBQyxLQUFLLENBQUMsQ0FBQyxDQUFDLENBQUM7WUFDcEUsV0FBVyxHQUFHLElBQUksQ0FBQywrQkFBK0IsQ0FBQyxXQUFXLEVBQUUsS0FBSyxDQUFDLENBQUM7U0FDeEU7UUFDRCwyRUFBMkU7UUFDM0UsaUJBQWlCO1FBQ2pCLE1BQU0sUUFBUSxHQUFHLFVBQVUsQ0FBQyxHQUFHLENBQUMsS0FBSyxDQUFDLEVBQUUsQ0FBQyxLQUFLLENBQUMsTUFBTSxDQUFDLENBQUM7UUFDdkQsSUFBSSxVQUFVLENBQUMsT0FBTyxDQUFDLElBQUksQ0FBQyxLQUFLLENBQUMsQ0FBQztZQUMvQixhQUFhLENBQUMsTUFBTSxDQUFDLFFBQVEsQ0FBQyxDQUFDLE1BQU0sS0FBSyxDQUFDLEVBQUU7WUFDL0MsSUFBSSxDQUFDLGVBQWUsR0FBRyxLQUFLLENBQUM7U0FDOUI7YUFBTTtZQUNMLElBQUksQ0FBQyxlQUFlLEdBQUcsSUFBSSxDQUFDO1NBQzdCO0lBQ0gsQ0FBQztJQUVRLElBQUksQ0FBQyxNQUF1QixFQUFFLE1BQWM7UUFDbkQsT0FBTyxJQUFJLENBQUMsR0FBRyxFQUFFO1lBQ2YsTUFBTSxHQUFHLE1BQWtCLENBQUM7WUFDNUIsSUFBSSxJQUFJLENBQUMsZUFBZSxFQUFFO2dCQUN4QixNQUFNLGNBQWMsR0FBYSxFQUFFLENBQUM7Z0JBQ3BDLE1BQU0sU0FBUyxHQUFHLE1BQU0sQ0FBQyxHQUFHLENBQUMsS0FBSyxDQUFDLEVBQUUsQ0FBQyxLQUFLLENBQUMsSUFBSSxDQUFDLENBQUM7Z0JBQ2xELElBQUksU0FBUyxDQUFDLE9BQU8sQ0FBQyxJQUFJLENBQUMsS0FBSyxDQUFDLENBQUMsRUFBRTtvQkFDbEMsc0VBQXNFO29CQUN0RSxrREFBa0Q7b0JBQ2xELE1BQU0sT0FBTyxHQUFHLFNBQVMsQ0FBQyxHQUFHLENBQUMsU0FBUyxDQUFDLENBQUM7b0JBQ3pDLEtBQUssSUFBSSxDQUFDLElBQUksTUFBTSxFQUFFO3dCQUNwQixNQUFNLEtBQUssR0FBRyxDQUFDLENBQUMsSUFBSSxDQUFDO3dCQUNyQixLQUFLLElBQUksQ0FBQyxHQUFHLENBQUMsRUFBRSxDQUFDLEdBQUcsT0FBTyxHQUFHLEtBQUssRUFBRSxFQUFFLENBQUMsRUFBRTs0QkFDeEMsQ0FBQyxHQUFHLENBQUMsQ0FBQyxVQUFVLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxDQUFDO3lCQUN4Qjt3QkFDRCxjQUFjLENBQUMsSUFBSSxDQUFDLENBQUMsQ0FBQyxDQUFDO3FCQUN4QjtvQkFDRCxPQUFPLElBQUksQ0FBQyxhQUFhLENBQUMsY0FBYyxDQUFDLENBQUM7aUJBQzNDO3FCQUFNO29CQUNMLGlFQUFpRTtvQkFDakUsK0RBQStEO29CQUMvRCxJQUFJLFVBQVUsR0FBRyxLQUFLLENBQUM7b0JBQ3ZCLEtBQUssTUFBTSxDQUFDLElBQUksTUFBTSxFQUFFO3dCQUN0QixNQUFNLEtBQUssR0FBRyxDQUFDLENBQUMsSUFBSSxDQUFDO3dCQUNyQixJQUFJLEtBQUssSUFBSSxJQUFJLEVBQUU7NEJBQ2pCLE1BQU0sTUFBTSxHQUFHLENBQUMsQ0FBQyxLQUFLLENBQUM7NEJBQ3ZCLE1BQU0sU0FBUyxHQUFHLE1BQU0sQ0FBQyxDQUFDLENBQUMsQ0FBQzs0QkFDNUIsTUFBTSxRQUFRLEdBQUcsTUFBTSxDQUFDLEtBQUssQ0FBQyxDQUFDLENBQUMsQ0FBQyxNQUFNLENBQUMsQ0FBQyxTQUFTLENBQUMsQ0FBQyxDQUFDOzRCQUNyRCxJQUFJLFdBQVcsR0FBRyxHQUFHLENBQUMsT0FBTyxDQUN6QixDQUFDLEVBQUUsQ0FBQyxTQUFTLENBQUMsQ0FBQyxNQUFNLENBQUMsU0FBUyxDQUFDLFNBQVMsQ0FBQyxNQUFNLENBQUMsS0FBSyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDOzRCQUNqRSxXQUFXLEdBQUcsR0FBRyxDQUFDLFNBQVMsQ0FBQyxXQUFXLEVBQUUsQ0FBQyxDQUFDLEVBQUUsQ0FBQyxDQUFDLENBQUMsQ0FBQzs0QkFDakQsV0FBVyxHQUFHLEdBQUcsQ0FBQyxPQUFPLENBQUMsV0FBVyxFQUFFLFFBQVEsQ0FBQyxDQUFDOzRCQUNqRCxjQUFjLENBQUMsSUFBSSxDQUFDLFdBQVcsQ0FBQyxDQUFDOzRCQUNqQyxVQUFVLEdBQUcsSUFBSSxDQUFDO3lCQUNuQjs2QkFBTSxJQUFJLEtBQUssR0FBRyxDQUFDLEVBQUU7NEJBQ3BCLE1BQU0sSUFBSSxHQUFHLFNBQVMsQ0FBQyxLQUFLLENBQUMsQ0FBQyxFQUFFLEtBQUssQ0FBQyxDQUFDLE1BQU0sQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUM7NEJBQ25ELGNBQWMsQ0FBQyxJQUFJLENBQUMsR0FBRyxDQUFDLFNBQVMsQ0FBQyxDQUFDLEVBQUUsSUFBSSxDQUFDLENBQUMsQ0FBQzs0QkFDNUMsVUFBVSxHQUFHLElBQUksQ0FBQzt5QkFDbkI7NkJBQU07NEJBQ0wsK0RBQStEOzRCQUMvRCxjQUFjLENBQUMsSUFBSSxDQUFDLENBQUMsQ0FBQyxDQUFDO3lCQUN4QjtxQkFDRjtvQkFDRCxJQUFJLENBQUMsR0FBRyxJQUFJLENBQUMsYUFBYSxDQUFDLGNBQWMsQ0FBQyxDQUFDO29CQUMzQyxNQUFNLEtBQUssR0FBRyxDQUFDLENBQUMsSUFBSSxDQUFDO29CQUNyQixJQUFJLFVBQVUsRUFBRTt3QkFDZCxrRUFBa0U7d0JBQ2xFLE9BQU87d0JBQ1AsSUFBSSxLQUFLLElBQUksSUFBSSxFQUFFOzRCQUNqQixNQUFNLE1BQU0sR0FBRyxDQUFDLENBQUMsS0FBSyxDQUFDOzRCQUN2QixNQUFNLEtBQUssR0FBRyxNQUFNLENBQUMsTUFBTSxDQUFDOzRCQUM1QixNQUFNLFNBQVMsR0FBRyxNQUFNLENBQUMsS0FBSyxHQUFHLENBQUMsQ0FBQyxDQUFDOzRCQUNwQyxNQUFNLFFBQVEsR0FDVixDQUFDLFNBQVMsQ0FBQyxDQUFDLE1BQU0sQ0FBQyxNQUFNLENBQUMsS0FBSyxDQUFDLENBQUMsRUFBRSxNQUFNLENBQUMsTUFBTSxHQUFHLENBQUMsQ0FBQyxDQUFDLENBQUM7NEJBQzNELENBQUMsR0FBRyxHQUFHLENBQUMsT0FBTyxDQUNYLEdBQUcsQ0FBQyxTQUFTLENBQUMsR0FBRyxDQUFDLE9BQU8sQ0FBQyxDQUFDLEVBQUUsQ0FBQyxDQUFDLENBQUMsRUFBRSxTQUFTLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxDQUFDLEVBQ3RELFFBQVEsQ0FBQyxDQUFDO3lCQUNmOzZCQUFNLElBQUksS0FBSyxHQUFHLENBQUMsRUFBRTs0QkFDcEIsTUFBTSxJQUFJLEdBQUcsQ0FBQyxLQUFLLEdBQUcsQ0FBQyxDQUFDLENBQUMsTUFBTSxDQUFDLFNBQVMsQ0FBQyxLQUFLLENBQUMsQ0FBQyxFQUFFLEtBQUssR0FBRyxDQUFDLENBQUMsQ0FBQyxDQUFDOzRCQUMvRCxDQUFDLEdBQUcsR0FBRyxDQUFDLFNBQVMsQ0FBQyxDQUFDLEVBQUUsSUFBSSxDQUFDLENBQUM7eUJBQzVCO3FCQUNGO29CQUNELE9BQU8sQ0FBQyxDQUFDO2lCQUNWO2FBQ0Y7aUJBQU07Z0JBQ0wsT0FBTyxJQUFJLENBQUMsYUFBYSxDQUFDLE1BQU0sQ0FBQyxDQUFDO2FBQ25DO1FBQ0gsQ0FBQyxDQUFDLENBQUM7SUFDTCxDQUFDO0lBRVEsa0JBQWtCLENBQUMsVUFBeUI7UUFDbkQsVUFBVSxHQUFHLFVBQXFCLENBQUM7UUFDbkMsSUFBSSxXQUFrQixDQUFDO1FBQ3ZCLElBQUksVUFBVSxDQUFDLENBQUMsQ0FBQyxJQUFJLElBQUksRUFBRTtZQUN6QixXQUFXLEdBQUcsSUFBSSxDQUFDO1NBQ3BCO2FBQU07WUFDTCxXQUFXLEdBQUcsVUFBVSxDQUFDLENBQUMsQ0FBQyxDQUFDLEtBQUssQ0FBQyxDQUFDLENBQUMsQ0FBQztTQUN0QztRQUNELEtBQUssSUFBSSxDQUFDLEdBQUcsQ0FBQyxFQUFFLENBQUMsR0FBRyxVQUFVLENBQUMsTUFBTSxFQUFFLEVBQUUsQ0FBQyxFQUFFO1lBQzFDLE1BQU0sS0FBSyxHQUFHLFVBQVUsQ0FBQyxDQUFDLENBQUMsSUFBSSxJQUFJLENBQUMsQ0FBQyxDQUFDLElBQUksQ0FBQyxDQUFDLENBQUMsVUFBVSxDQUFDLENBQUMsQ0FBQyxDQUFDLEtBQUssQ0FBQyxDQUFDLENBQUMsQ0FBQztZQUNwRSxXQUFXLEdBQUcsSUFBSSxDQUFDLCtCQUErQixDQUFDLFdBQVcsRUFBRSxLQUFLLENBQUMsQ0FBQztTQUN4RTtRQUVELElBQUksVUFBVSxHQUFhLEVBQUUsQ0FBQztRQUM5QixLQUFLLE1BQU0sS0FBSyxJQUFJLFVBQVUsRUFBRTtZQUM5QixJQUFJLEtBQUssSUFBSSxJQUFJLElBQUksS0FBSyxDQUFDLENBQUMsQ0FBQyxLQUFLLElBQUksRUFBRTtnQkFDdEMsVUFBVSxDQUFDLElBQUksQ0FBQyxLQUFLLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQzthQUMzQjtTQUNGO1FBQ0QsVUFBVSxHQUFHLGFBQWEsQ0FBQyxNQUFNLENBQUMsVUFBVSxDQUFDLENBQUM7UUFDOUMsSUFBSSxVQUFVLENBQUMsTUFBTSxLQUFLLENBQUMsRUFBRTtZQUMzQixXQUFXLEdBQUcsVUFBVSxDQUFDLE1BQU0sQ0FBQyxXQUFXLENBQUMsQ0FBQztTQUM5QzthQUFNO1lBQ0wsV0FBVyxHQUFHLENBQUMsSUFBSSxDQUFDLENBQUMsTUFBTSxDQUFDLFdBQVcsQ0FBQyxDQUFDO1NBQzFDO1FBQ0QsT0FBTyxXQUFXLENBQUM7SUFDckIsQ0FBQztJQUVRLFdBQVcsQ0FBQyxNQUF1QixFQUFFLElBQXNCO1FBRWxFLE9BQU8sR0FBRyxDQUFDLElBQUksQ0FBQyxHQUFHLEVBQUU7WUFDbkIsSUFBSSxJQUFJLElBQUksSUFBSSxFQUFFO2dCQUNoQixPQUFPLElBQUksQ0FBQzthQUNiO1lBQ0QsSUFBSSxDQUFDLEtBQUssQ0FBQyxPQUFPLENBQUMsSUFBSSxDQUFDLEVBQUU7Z0JBQ3hCLE1BQU0sSUFBSSxVQUFVLENBQUMsMkJBQTJCLENBQUMsQ0FBQzthQUNuRDtZQUNELElBQUksQ0FBQyxLQUFLLENBQUMsT0FBTyxDQUFDLE1BQU0sQ0FBQyxFQUFFO2dCQUMxQixNQUFNLElBQUksVUFBVSxDQUFDLDZCQUE2QixDQUFDLENBQUM7YUFDckQ7WUFDRCxJQUFJLElBQUksQ0FBQyxNQUFNLEtBQUssTUFBTSxDQUFDLE1BQU0sRUFBRTtnQkFDakMsTUFBTSxJQUFJLFVBQVUsQ0FDaEIsOERBQThEO29CQUM5RCxxQ0FBcUM7b0JBQ3JDLElBQUksTUFBTSxDQUFDLE1BQU0sT0FBTyxJQUFJLENBQUMsTUFBTSxHQUFHLENBQUMsQ0FBQzthQUM3QztZQUNELElBQUksSUFBSSxDQUFDLEtBQUssQ0FBQyxDQUFDLENBQUMsRUFBRSxDQUFDLENBQUMsSUFBSSxJQUFJLENBQUMsRUFBRTtnQkFDOUIsT0FBTyxJQUFJLENBQUM7YUFDYjtZQUNELElBQUksR0FBRyxJQUFJLENBQUMsR0FBRyxDQUFDLENBQUMsQ0FBQyxFQUFFLENBQUMsQ0FBQyxJQUFJLElBQUksQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQyxHQUFHLENBQUMsVUFBVSxDQUFDLENBQUMsRUFBRSxDQUFDLENBQUMsQ0FBQyxDQUFDO1lBQzNELElBQUksTUFBTSxHQUFHLElBQUksQ0FBQyxDQUFDLENBQUMsQ0FBQztZQUNyQixLQUFLLElBQUksQ0FBQyxHQUFHLENBQUMsRUFBRSxDQUFDLEdBQUcsSUFBSSxDQUFDLE1BQU0sR0FBRyxDQUFDLEVBQUUsRUFBRSxDQUFDLEVBQUU7Z0JBQ3hDLE1BQU0sR0FBRyxHQUFHLENBQUMsVUFBVSxDQUFDLE1BQU0sRUFBRSxJQUFJLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQzthQUMxQztZQUNELE9BQU8sTUFBTSxDQUFDO1FBQ2hCLENBQUMsQ0FBQyxDQUFDO0lBQ0wsQ0FBQztDQUNGO0FBRUQsTUFBYSxHQUFJLFNBQVEsS0FBSztJQUc1QixZQUFZLElBQWdCO1FBQzFCLEtBQUssQ0FBQyxJQUFJLENBQUMsQ0FBQztJQUNkLENBQUM7SUFFa0IsYUFBYSxDQUFDLE1BQWdCO1FBQy9DLE9BQU8sSUFBSSxDQUFDLEdBQUcsRUFBRTtZQUNmLElBQUksTUFBTSxHQUFHLE1BQU0sQ0FBQyxDQUFDLENBQUMsQ0FBQyxLQUFLLEVBQUUsQ0FBQztZQUMvQixLQUFLLElBQUksQ0FBQyxHQUFHLENBQUMsRUFBRSxDQUFDLEdBQUcsTUFBTSxDQUFDLE1BQU0sRUFBRSxFQUFFLENBQUMsRUFBRTtnQkFDdEMsTUFBTSxHQUFHLEdBQUcsQ0FBQyxHQUFHLENBQUMsTUFBTSxFQUFFLE1BQU0sQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDO2FBQ3JDO1lBQ0QsT0FBTyxNQUFNLENBQUM7UUFDaEIsQ0FBQyxDQUFDLENBQUM7SUFDTCxDQUFDOztBQWRELGtCQUFrQjtBQUNYLGFBQVMsR0FBRyxLQUFLLENBQUM7U0FGZCxHQUFHO0FBaUJoQixhQUFhLENBQUMsYUFBYSxDQUFDLEdBQUcsQ0FBQyxDQUFDO0FBRWpDOzs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7R0E2Q0c7QUFDSCxNQUFNLFVBQVUsR0FBRyxDQUFDLE1BQTRDO0lBRTlELElBQUksS0FBSyxDQUFDLE9BQU8sQ0FBQyxNQUFNLENBQUMsRUFBRTtRQUN6QixNQUFNLEtBQUssR0FBRyxJQUFJLEdBQUcsQ0FBQyxFQUFFLENBQUMsQ0FBQztRQUMxQixPQUFPLEtBQUssQ0FBQyxLQUFLLENBQUMsTUFBTSxDQUE0QixDQUFDO0tBQ3ZEO1NBQU07UUFDTCxPQUFPLElBQUksR0FBRyxDQUFDLE1BQU0sQ0FBQyxDQUFDO0tBQ3hCO0FBQ0gsQ0FBQztBQUVELE1BQWEsUUFBUyxTQUFRLEtBQUs7SUFHakMsWUFBWSxJQUFnQjtRQUMxQixLQUFLLENBQUMsSUFBSSxDQUFDLENBQUM7SUFDZCxDQUFDO0lBRWtCLGFBQWEsQ0FBQyxNQUFnQjtRQUMvQyxPQUFPLElBQUksQ0FBQyxHQUFHLEVBQUU7WUFDZixJQUFJLE1BQU0sR0FBRyxNQUFNLENBQUMsQ0FBQyxDQUFDLENBQUMsS0FBSyxFQUFFLENBQUM7WUFDL0IsS0FBSyxJQUFJLENBQUMsR0FBRyxDQUFDLEVBQUUsQ0FBQyxHQUFHLE1BQU0sQ0FBQyxNQUFNLEVBQUUsRUFBRSxDQUFDLEVBQUU7Z0JBQ3RDLE1BQU0sR0FBRyxHQUFHLENBQUMsR0FBRyxDQUFDLE1BQU0sRUFBRSxNQUFNLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQzthQUNyQztZQUNELE9BQU8sTUFBTSxDQUFDO1FBQ2hCLENBQUMsQ0FBQyxDQUFDO0lBQ0wsQ0FBQzs7QUFkRCxrQkFBa0I7QUFDWCxrQkFBUyxHQUFHLFVBQVUsQ0FBQztTQUZuQixRQUFRO0FBaUJyQixhQUFhLENBQUMsYUFBYSxDQUFDLFFBQVEsQ0FBQyxDQUFDO0FBRXRDOzs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7R0E2Q0c7QUFDSCxNQUFNLFVBQVUsUUFBUSxDQUFDLE1BQTRDO0lBRW5FLElBQUksS0FBSyxDQUFDLE9BQU8sQ0FBQyxNQUFNLENBQUMsRUFBRTtRQUN6QixNQUFNLEtBQUssR0FBRyxJQUFJLFFBQVEsQ0FBQyxFQUFFLENBQUMsQ0FBQztRQUMvQixPQUFPLEtBQUssQ0FBQyxLQUFLLENBQUMsTUFBTSxDQUE0QixDQUFDO0tBQ3ZEO1NBQU07UUFDTCxPQUFPLElBQUksUUFBUSxDQUFDLE1BQU0sQ0FBQyxDQUFDO0tBQzdCO0FBQ0gsQ0FBQztBQUVELE1BQWEsT0FBUSxTQUFRLEtBQUs7SUFHaEMsWUFBWSxJQUFnQjtRQUMxQixLQUFLLENBQUMsSUFBSSxDQUFDLENBQUM7SUFDZCxDQUFDO0lBRWtCLGFBQWEsQ0FBQyxNQUFnQjtRQUMvQyxPQUFPLElBQUksQ0FBQyxHQUFHLEVBQUU7WUFDZixJQUFJLE1BQU0sR0FBRyxNQUFNLENBQUMsQ0FBQyxDQUFDLENBQUMsS0FBSyxFQUFFLENBQUM7WUFDL0IsS0FBSyxJQUFJLENBQUMsR0FBRyxDQUFDLEVBQUUsQ0FBQyxHQUFHLE1BQU0sQ0FBQyxNQUFNLEVBQUUsRUFBRSxDQUFDLEVBQUU7Z0JBQ3RDLE1BQU0sR0FBRyxHQUFHLENBQUMsR0FBRyxDQUFDLE1BQU0sRUFBRSxNQUFNLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQzthQUNyQztZQUNELE9BQU8sR0FBRyxDQUFDLEdBQUcsQ0FBQyxDQUFDLEdBQUcsTUFBTSxDQUFDLE1BQU0sRUFBRSxNQUFNLENBQUMsQ0FBQztRQUM1QyxDQUFDLENBQUMsQ0FBQztJQUNMLENBQUM7O0FBZEQsa0JBQWtCO0FBQ1gsaUJBQVMsR0FBRyxTQUFTLENBQUM7U0FGbEIsT0FBTztBQWlCcEIsYUFBYSxDQUFDLGFBQWEsQ0FBQyxPQUFPLENBQUMsQ0FBQztBQUVyQzs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7OztHQThDRztBQUNILE1BQU0sVUFBVSxPQUFPLENBQUMsTUFBNEM7SUFFbEUsSUFBSSxLQUFLLENBQUMsT0FBTyxDQUFDLE1BQU0sQ0FBQyxFQUFFO1FBQ3pCLE1BQU0sS0FBSyxHQUFHLElBQUksT0FBTyxDQUFDLEVBQUUsQ0FBQyxDQUFDO1FBQzlCLE9BQU8sS0FBSyxDQUFDLEtBQUssQ0FBQyxNQUFNLENBQTRCLENBQUM7S0FDdkQ7U0FBTTtRQUNMLE9BQU8sSUFBSSxPQUFPLENBQUMsTUFBTSxDQUFDLENBQUM7S0FDNUI7QUFDSCxDQUFDO0FBRUQsTUFBYSxPQUFRLFNBQVEsS0FBSztJQUdoQyxZQUFZLElBQWdCO1FBQzFCLEtBQUssQ0FBQyxJQUFJLENBQUMsQ0FBQztJQUNkLENBQUM7SUFFa0IsYUFBYSxDQUFDLE1BQWdCO1FBQy9DLE9BQU8sSUFBSSxDQUFDLEdBQUcsRUFBRTtZQUNmLElBQUksTUFBTSxHQUFHLE1BQU0sQ0FBQyxDQUFDLENBQUMsQ0FBQztZQUN2QixLQUFLLElBQUksQ0FBQyxHQUFHLENBQUMsRUFBRSxDQUFDLEdBQUcsTUFBTSxDQUFDLE1BQU0sRUFBRSxFQUFFLENBQUMsRUFBRTtnQkFDdEMsTUFBTSxHQUFHLEdBQUcsQ0FBQyxPQUFPLENBQUMsTUFBTSxFQUFFLE1BQU0sQ0FBQyxDQUFDLENBQUMsQ0FBQyxDQUFDO2FBQ3pDO1lBQ0QsT0FBTyxNQUFNLENBQUM7UUFDaEIsQ0FBQyxDQUFDLE