federer
Version:
Experiments in asynchronous federated learning and decentralized learning
87 lines • 4.12 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.Evaluator = void 0;
const common_1 = require("../../common");
const PrioritizedLockedTaskRunner_1 = require("../PrioritizedLockedTaskRunner");
/** The evaluator is a class responsible for evaluating models. */
class Evaluator {
constructor(model, testSet, options) {
this.model = model;
this.testSet = testSet;
this.options = options;
/** Task runner for evaluation tasks. */
this.evaluationTaskRunner = new PrioritizedLockedTaskRunner_1.PrioritizedLockedTaskRunner();
}
/**
* Evaluates the results sent by the server to the coordinator. The promise is
* rejected if a higher-priority evaluation request is received before we
* start evaluating the current evaluation request.
*
* @param summary Round summary, sent by the server
* @returns A promise that resolves to the evaluation results if evaluation
* was run, or that rejects if a newer round summary was received before we
* started evaluating `summary`
*/
evaluate(summary) {
// Two problems can occur when running the evaluation:
//
// 1. We receive round summaries faster than we can evaluate them
// 2. We receive round summaries out of order, which leads to graphs where
// points aren't ordered according to the x-axis (i.e. the graph goes
// "backwards" sometimes).
//
// To solve these problems, we use a locked, prioritized task runner. This
// solves the above problems as follows:
//
// 1. The lock prevents us from trying to evaluate the model in two
// promises at once; we don't have concurrency in changing the model
// weights.
// 2. Setting the round number as the task priority invalidates the
// registered tasks with lower priority, if they haven't run yet. This
// ensures we always evaluate the results from the latest received
// round.
//
// There is still a risk that we could get updates so fast that they always
// get invalidated by a newer task before we start running them. But in
// practice updates aren't coming in *that* fast.
//
// Note that this also means that when updates come in faster than we can
// evaluate them, we will "skip" some rounds. This might be fine for now,
// but we can maybe come back to this and run a full evaluation of all round
// summaries after the training has completed.
return this.evaluationTaskRunner.run(summary.roundNumber, () => {
this.setModelWeights(summary.weights);
return common_1.assertNoLeakingTensors("evaluate", () => common_1.tidy(() => {
const x = this.testSet.items;
const y = this.testSet.labels;
const rawResults = this.model.evaluate(x, y, {
verbose: this.options.tensorflowVerbosity ?? 0,
});
return this.parseEvaluationResults(rawResults, summary);
}));
});
}
setModelWeights(serializedWeights) {
const weights = common_1.Weights.deserialize(serializedWeights);
const variables = weights.toVariables();
this.model.setWeights(variables);
weights.dispose();
variables.forEach((v) => v.dispose());
}
parseEvaluationResults(result, summary) {
if (!Array.isArray(result)) {
throw new Error("Expected evaluation to return an array of scalars, got a single scalar");
}
if (result.length !== 2) {
throw new Error(`Expected evaluation to return an array with 2 scalars, got an array with ${result.length} scalars`);
}
const [testLoss, testAccuracy] = result;
return {
loss: testLoss.arraySync(),
accuracy: testAccuracy.arraySync(),
round: summary,
};
}
}
exports.Evaluator = Evaluator;
//# sourceMappingURL=Evaluator.js.map