hsd
Version:
Cryptocurrency bike-shed
1,914 lines (1,561 loc) • 103 kB
JavaScript
/*!
* chain.js - blockchain management for hsd
* Copyright (c) 2017-2018, Christopher Jeffrey (MIT License).
* https://github.com/handshake-org/hsd
*/
'use strict';
const assert = require('bsert');
const path = require('path');
const AsyncEmitter = require('bevent');
const Logger = require('blgr');
const {Lock} = require('bmutex');
const LRU = require('blru');
const {BufferMap, BufferSet} = require('buffer-map');
const Network = require('../protocol/network');
const ChainDB = require('./chaindb');
const common = require('./common');
const consensus = require('../protocol/consensus');
const rules = require('../covenants/rules');
const NameState = require('../covenants/namestate');
const util = require('../utils/util');
const ChainEntry = require('./chainentry');
const CoinView = require('../coins/coinview');
const Script = require('../script/script');
const {VerifyError} = require('../protocol/errors');
const {OwnershipProof} = require('../covenants/ownership');
const AirdropProof = require('../primitives/airdropproof');
const {CriticalError} = require('../errors');
const thresholdStates = common.thresholdStates;
const scanActions = common.scanActions;
const {states} = NameState;
const {
VERIFY_COVENANTS_HARDENED,
VERIFY_COVENANTS_LOCKUP
} = rules.nameFlags;
/** @typedef {import('../types').Hash} Hash */
/** @typedef {import('../types').LockFlags} LockFlags */
/** @typedef {import('@handshake-org/bfilter').BloomFilter} BloomFilter */
/** @typedef {import('../primitives/block')} Block */
/** @typedef {import('../primitives/tx')} TX */
/** @typedef {import('../primitives/txmeta')} TXMeta */
/** @typedef {import('../primitives/outpoint')} Outpoint */
/** @typedef {import('../primitives/coin')} Coin */
/** @typedef {import('../primitives/address')} Address */
/** @typedef {import('../coins/coinentry')} CoinEntry */
/**
* Blockchain
* @alias module:blockchain.Chain
* @property {ChainDB} db
* @property {ChainEntry?} tip
* @property {Number} height
* @property {DeploymentState} state
*/
class Chain extends AsyncEmitter {
/**
* Create a blockchain.
* @constructor
* @param {Object} options
*/
constructor(options) {
super();
this.opened = false;
this.options = new ChainOptions(options);
this.network = this.options.network;
this.logger = this.options.logger.context('chain');
this.workers = this.options.workers;
this.db = new ChainDB(this.options);
this.locker = new Lock(true, BufferMap);
this.invalid = new LRU(5000, null, BufferMap);
this.state = new DeploymentState(this.network.genesis.hash);
this.tip = new ChainEntry();
this.height = -1;
this.synced = false;
this.orphanMap = new BufferMap();
this.orphanPrev = new BufferMap();
}
/**
* Open the chain, wait for the database to load.
* @returns {Promise<void>}
*/
async open() {
assert(!this.opened, 'Chain is already open.');
this.opened = true;
this.logger.info('Chain is loading.');
if (this.options.checkpoints)
this.logger.info('Checkpoints are enabled.');
await this.db.open();
const tip = await this.db.getTip();
assert(tip);
this.tip = tip;
this.height = tip.height;
this.logger.info('Chain Height: %d', tip.height);
this.logger.memory();
const state = await this.getDeploymentState();
this.setDeploymentState(state);
if (!this.options.spv) {
const sync = await this.tryCompact();
if (sync)
await this.syncTree();
}
this.logger.memory();
this.emit('tip', tip);
this.maybeSync();
}
/**
* Close the chain, wait for the database to close.
* @returns {Promise<void>}
*/
async close() {
assert(this.opened, 'Chain is not open.');
this.opened = false;
return this.db.close();
}
/**
* Get compaction heights.
* @returns {Promise<Object>}
*/
async getCompactionHeights() {
if (this.options.spv)
return null;
const {keepBlocks} = this.network.block;
const {compactionHeight} = await this.db.getTreeState();
const {compactTreeInitInterval} = this.options;
const compactFrom = compactionHeight + keepBlocks + compactTreeInitInterval;
return {
compactionHeight,
compactFrom
};
}
/**
* Check if we need to compact tree data.
* @returns {Promise<Boolean>} - Should we sync
*/
async tryCompact() {
if (this.options.spv)
return false;
if (!this.options.compactTreeOnInit)
return true;
const {txStart} = this.network;
const {keepBlocks} = this.network.block;
const startFrom = txStart + keepBlocks;
if (this.height <= startFrom)
return true;
const {compactFrom} = await this.getCompactionHeights();
if (compactFrom > this.height) {
this.logger.debug(
`Tree will compact when restarted after height ${compactFrom}.`);
return true;
}
// Compact tree calls syncTree so we don't want to rerun it.
await this.compactTree();
return false;
}
/**
* Sync tree state.
*/
async syncTree() {
this.logger.info('Synchronizing Tree with block history...');
// Current state of the tree, loaded from chain database and
// injected in chainDB.open(). It should be in the most
// recently-committed state, which should have been at the last
// tree interval. We might also need to recover from a
// failed compactTree() operation. Either way, there might have been
// new blocks added to the chain since then.
const currentRoot = this.db.treeRoot();
// We store commit height for the tree in the tree state.
// commitHeight is the height of the block that committed tree root.
// Note that the block at commitHeight has different tree root.
const treeState = await this.db.getTreeState();
const {commitHeight} = treeState;
// sanity check
if (commitHeight < this.height) {
const entry = await this.db.getEntryByHeight(commitHeight + 1);
assert(entry.treeRoot.equals(treeState.treeRoot));
assert(entry.treeRoot.equals(currentRoot));
}
// Replay all blocks since the last tree interval to rebuild
// the `txn` which is the in-memory delta between tree interval commitments.
for (let height = commitHeight + 1; height <= this.height; height++) {
const entry = await this.db.getEntryByHeight(height);
assert(entry);
const block = await this.db.getBlock(entry.hash);
assert(block);
const state = await this.readDeploymentState(entry);
assert(state);
const view = new CoinView();
for (const tx of block.txs)
await this.verifyCovenants(tx, view, height, state.nameFlags);
// If the chain replay crosses a tree interval, it will commit
// and write to disk in saveNames(), resetting the `txn` like usual.
await this.db.saveNames(view, entry, false);
}
this.logger.info('Synchronized Tree Root: %x.', this.db.txn.rootHash());
}
/**
* Perform all necessary contextual verification on a block.
* @private
* @param {Block} block
* @param {ChainEntry} prev
* @param {Number} flags
* @returns {Promise<Array>} - [CoinView, DeploymentState]
*/
async verifyContext(block, prev, flags) {
// Initial non-contextual verification.
const state = await this.verify(block, prev, flags);
// Skip everything if we're in SPV mode.
if (this.options.spv) {
const view = new CoinView();
return [view, state];
}
// Skip everything if we're using checkpoints.
if (this.isHistorical(prev)) {
const view = await this.updateInputs(block, prev, state);
return [view, state];
}
// Verify scripts, spend and add coins.
const view = await this.verifyInputs(block, prev, state);
return [view, state];
}
/**
* Perform all necessary contextual verification
* on a block, without POW check.
* @param {Block} block
* @returns {Promise<Array>} - [CoinView, DeploymentState]
*/
async verifyBlock(block) {
const unlock = await this.locker.lock();
try {
return await this._verifyBlock(block);
} finally {
unlock();
}
}
/**
* Perform all necessary contextual verification
* on a block, without POW check (no lock).
* @private
* @param {Block} block
* @returns {Promise<Array>} - [CoinView, DeploymentState]
*/
async _verifyBlock(block) {
const flags = common.DEFAULT_FLAGS & ~common.flags.VERIFY_POW;
return this.verifyContext(block, this.tip, flags);
}
/**
* Test whether the hash is in the main chain.
* @param {Hash} hash
* @returns {Promise<Boolean>}
*/
isMainHash(hash) {
return this.db.isMainHash(hash);
}
/**
* Test whether the entry is in the main chain.
* @param {ChainEntry} entry
* @returns {Promise<Boolean>}
*/
isMainChain(entry) {
return this.db.isMainChain(entry);
}
/**
* Get ancestor by `height`.
* @param {ChainEntry} entry
* @param {Number} height
* @returns {Promise<ChainEntry?>}
*/
getAncestor(entry, height) {
return this.db.getAncestor(entry, height);
}
/**
* Get previous entry.
* @param {ChainEntry} entry
* @returns {Promise<ChainEntry?>}
*/
getPrevious(entry) {
return this.db.getPrevious(entry);
}
/**
* Get previous cached entry.
* @param {ChainEntry} entry
* @returns {ChainEntry?}
*/
getPrevCache(entry) {
return this.db.getPrevCache(entry);
}
/**
* Get next entry.
* @param {ChainEntry} entry
* @returns {Promise<ChainEntry?>}
*/
getNext(entry) {
return this.db.getNext(entry);
}
/**
* Get next entry.
* @param {ChainEntry} entry
* @returns {Promise<ChainEntry?>}
*/
getNextEntry(entry) {
return this.db.getNextEntry(entry);
}
/**
* Calculate median time past.
* @param {ChainEntry} prev
* @returns {Promise<Number>}
*/
async getMedianTime(prev) {
const timespan = consensus.MEDIAN_TIMESPAN;
const median = [];
let entry = prev;
for (let i = 0; i < timespan && entry; i++) {
median.push(entry.time);
const cache = this.getPrevCache(entry);
if (cache)
entry = cache;
else
entry = await this.getPrevious(entry);
}
median.sort(cmp);
return median[median.length >>> 1];
}
/**
* Test whether the entry is potentially
* an ancestor of a checkpoint.
* @param {ChainEntry} prev
* @returns {Boolean}
*/
isHistorical(prev) {
if (this.options.checkpoints) {
if (prev.height + 1 <= this.network.lastCheckpoint)
return true;
}
return false;
}
/**
* Test whether the height is potentially
* an ancestor of a checkpoint.
* @param {Number} height
* @returns {Boolean}
*/
isHistoricalHeight(height) {
if (this.options.checkpoints) {
if (height <= this.network.lastCheckpoint)
return true;
}
return false;
}
/**
* Contextual verification for a block, including
* version deployments (IsSuperMajority), versionbits,
* coinbase height, finality checks.
* @private
* @param {Block} block
* @param {ChainEntry} prev
* @param {Number} flags
* @returns {Promise<DeploymentState>}
*/
async verify(block, prev, flags) {
assert(typeof flags === 'number');
// Extra sanity check.
if (!block.prevBlock.equals(prev.hash))
throw new VerifyError(block, 'invalid', 'bad-prevblk', 0);
// Verify a checkpoint if there is one.
const hash = block.hash();
if (!this.verifyCheckpoint(prev, hash)) {
throw new VerifyError(block,
'checkpoint',
'checkpoint mismatch',
100);
}
// Skip everything when using checkpoints.
// We can do this safely because every
// block in between each checkpoint was
// validated outside in the header chain.
if (this.isHistorical(prev)) {
// Check merkle root.
if (flags & common.flags.VERIFY_BODY) {
assert(typeof block.createMerkleRoot === 'function');
const root = block.createMerkleRoot();
if (!block.merkleRoot.equals(root)) {
throw new VerifyError(block,
'invalid',
'bad-txnmrklroot',
100,
true);
}
const witnessRoot = block.createWitnessRoot();
if (!block.witnessRoot.equals(witnessRoot)) {
throw new VerifyError(block,
'invalid',
'bad-witnessroot',
100,
true);
}
flags &= ~common.flags.VERIFY_BODY;
}
}
// Non-contextual checks.
if (flags & common.flags.VERIFY_BODY) {
const [valid, reason, score] = block.checkBody();
if (!valid)
throw new VerifyError(block, 'invalid', reason, score, true);
}
// Check name DoS limits.
const set = new BufferSet();
let opens = 0;
let updates = 0;
let renewals = 0;
for (let i = 0; i < block.txs.length; i++) {
const tx = block.txs[i];
opens += rules.countOpens(tx);
if (opens > consensus.MAX_BLOCK_OPENS) {
throw new VerifyError(block,
'invalid',
'bad-blk-opens',
100);
}
updates += rules.countUpdates(tx);
if (updates > consensus.MAX_BLOCK_UPDATES) {
throw new VerifyError(block,
'invalid',
'bad-blk-updates',
100);
}
renewals += rules.countRenewals(tx);
if (renewals > consensus.MAX_BLOCK_RENEWALS) {
throw new VerifyError(block,
'invalid',
'bad-blk-renewals',
100);
}
// Certain covenants can only be used once per name per block
if (rules.hasNames(tx, set)) {
throw new VerifyError(block,
'invalid',
'bad-blk-names',
100);
}
rules.addNames(tx, set);
}
// Ensure the POW is what we expect.
const bits = await this.getTarget(block.time, prev);
if (block.bits !== bits) {
this.logger.debug(
'Bad diffbits: 0x%s != 0x%s',
util.hex32(block.bits),
util.hex32(bits));
throw new VerifyError(block,
'invalid',
'bad-diffbits',
100);
}
// Ensure the timestamp is correct.
const mtp = await this.getMedianTime(prev);
if (block.time <= mtp) {
throw new VerifyError(block,
'invalid',
'time-too-old',
0);
}
// Check timestamp against adj-time+2hours.
// If this fails we may be able to accept
// the block later.
if (block.time > this.network.now() + 2 * 60 * 60) {
throw new VerifyError(block,
'invalid',
'time-too-new',
0,
true);
}
// Skip all blocks in spv mode once
// we've verified the network target.
if (this.options.spv)
return this.state;
// Calculate height of current block.
const height = prev.height + 1;
// Get the new deployment state.
const state = await this.getDeployments(block.time, prev);
// Transactions must be finalized with
// regards to nSequence and nLockTime.
for (let i = 1; i < block.txs.length; i++) {
const tx = block.txs[i];
if (!tx.isFinal(height, mtp)) {
throw new VerifyError(block,
'invalid',
'bad-txns-nonfinal',
10);
}
}
// Make sure the height contained
// in the coinbase is correct.
if (block.getCoinbaseHeight() !== height) {
throw new VerifyError(block,
'invalid',
'bad-cb-height',
100);
}
const cb = block.txs[0];
for (let i = 1; i < cb.inputs.length; i++) {
const {witness} = cb.inputs[i];
if (witness.items.length !== 1) {
throw new VerifyError(block,
'invalid',
'bad-witness-size',
100);
}
if (i >= cb.outputs.length) {
throw new VerifyError(block,
'invalid',
'bad-output',
100);
}
const output = cb.outputs[i];
// Airdrop proof.
if (!output.covenant.isClaim()) {
// Disable airdrop claims if airstop is activated
if (state.hasAirstop) {
throw new VerifyError(block,
'invalid',
'bad-airdrop-disabled',
100);
}
let proof;
try {
proof = AirdropProof.decode(witness.items[0]);
} catch (e) {
throw new VerifyError(block,
'invalid',
'bad-airdrop-format',
100);
}
if (!proof.isSane()) {
throw new VerifyError(block,
'invalid',
'bad-airdrop-sanity',
100);
}
if (prev.height + 1 >= this.network.goosigStop) {
const key = proof.getKey();
if (!key) {
throw new VerifyError(block,
'invalid',
'bad-airdrop-proof',
100);
}
if (key.isGoo()) {
throw new VerifyError(block,
'invalid',
'bad-goosig-disabled',
100);
}
}
// Note: GooSig RSA 1024 is possible to
// crack as well, but in order to make
// it safe we would need to include a
// commitment to the key size (bad).
// We may have to just disallow <2048
// bit for mainnet.
if (state.hasHardening()) {
if (proof.isWeak()) {
throw new VerifyError(block,
'invalid',
'bad-airdrop-sanity',
10);
}
}
continue;
}
// DNSSEC ownership proof.
let proof;
try {
proof = OwnershipProof.decode(witness.items[0]);
} catch (e) {
throw new VerifyError(block,
'invalid',
'bad-dnssec-format',
100);
}
// Verify times.
if (!proof.verifyTimes(prev.time)) {
throw new VerifyError(block,
'invalid',
'bad-dnssec-times',
10);
}
}
return state;
}
/**
* Check all deployments on a chain.
* @param {Number} time
* @param {ChainEntry} prev
* @returns {Promise<DeploymentState>}
*/
async getDeployments(time, prev) {
const deployments = this.network.deployments;
const state = new DeploymentState(prev.hash);
// Disable RSA-1024.
if (await this.isActive(prev, deployments.hardening))
state.nameFlags |= rules.nameFlags.VERIFY_COVENANTS_HARDENED;
// Disable ICANN, TOP100 and CUSTOM TLDs from getting auctioned.
if (await this.isActive(prev, deployments.icannlockup))
state.nameFlags |= rules.nameFlags.VERIFY_COVENANTS_LOCKUP;
// Disable airdrop claims.
if (await this.isActive(prev, deployments.airstop))
state.hasAirstop = true;
return state;
}
/**
* Set a new deployment state.
* @param {DeploymentState} state
*/
setDeploymentState(state) {
if (this.options.checkpoints && this.height < this.network.lastCheckpoint) {
this.state = state;
return;
}
if (!this.state.hasHardening() && state.hasHardening())
this.logger.warning('RSA hardening has been activated.');
if (this.height === this.network.deflationHeight)
this.logger.warning('Name claim deflation has been activated.');
if (!this.state.hasICANNLockup() && state.hasICANNLockup())
this.logger.warning('ICANN lockup has been activated.');
if (!this.state.hasAirstop && state.hasAirstop)
this.logger.warning('Airdrop claims has been disabled.');
this.state = state;
}
/**
* Spend and update inputs (checkpoints only).
* @private
* @param {Block} block
* @param {ChainEntry} prev
* @param {DeploymentState} state
* @returns {Promise<CoinView>}
*/
async updateInputs(block, prev, state) {
const view = new CoinView();
const height = prev.height + 1;
assert(block.treeRoot.equals(this.db.treeRoot()));
for (let i = 0; i < block.txs.length; i++) {
const tx = block.txs[i];
if (i === 0) {
assert(view.bits.spend(this.db.field, tx));
} else {
assert(await view.spendInputs(this.db, tx),
'BUG: Spent inputs in historical data!');
}
await this.verifyCovenants(tx, view, height, state.nameFlags);
view.addTX(tx, height);
}
return view;
}
/**
* Check block transactions for all things pertaining
* to inputs. This function is important because it is
* what actually fills the coins into the block. This
* function will check the block reward, the sigops,
* the tx values, and execute and verify the scripts (it
* will attempt to do this on the worker pool). If
* `checkpoints` is enabled, it will skip verification
* for historical data.
* @private
* @see TX#verifyInputs
* @see TX#verify
* @param {Block} block
* @param {ChainEntry} prev
* @param {DeploymentState} state
* @returns {Promise<CoinView>}
*/
async verifyInputs(block, prev, state) {
const network = this.network;
const view = new CoinView();
const height = prev.height + 1;
const interval = network.halvingInterval;
let sigops = 0;
let reward = 0;
// Check the name tree root.
if (!block.treeRoot.equals(this.db.treeRoot())) {
throw new VerifyError(block,
'invalid',
'bad-tree-root',
100);
}
// Check all transactions
for (let i = 0; i < block.txs.length; i++) {
const tx = block.txs[i];
// Ensure tx is not double spending an output.
if (i === 0) {
if (!view.bits.spend(this.db.field, tx)) {
throw new VerifyError(block,
'invalid',
'bad-txns-bits-missingorspent',
100);
}
} else {
if (!await view.spendInputs(this.db, tx)) {
throw new VerifyError(block,
'invalid',
'bad-txns-inputs-missingorspent',
100);
}
// Verify sequence locks.
const valid = await this.verifyLocks(prev, tx, view, state.lockFlags);
if (!valid) {
throw new VerifyError(block,
'invalid',
'bad-txns-nonfinal',
100);
}
}
// Count sigops.
sigops += tx.getSigops(view);
if (sigops > consensus.MAX_BLOCK_SIGOPS) {
throw new VerifyError(block,
'invalid',
'bad-blk-sigops',
100);
}
// Contextual sanity checks.
const [fee, reason, score] = tx.checkInputs(view, height, network);
if (fee === -1) {
throw new VerifyError(block,
'invalid',
reason,
score);
}
reward += fee;
if (reward > consensus.MAX_MONEY) {
throw new VerifyError(block,
'invalid',
'bad-cb-amount',
100);
}
// Verify covenants.
await this.verifyCovenants(tx, view, height, state.nameFlags);
// Add new coins.
view.addTX(tx, height);
}
// Make sure the miner isn't trying to conjure more coins.
reward += consensus.getReward(height, interval);
if (block.getClaimed() > reward) {
throw new VerifyError(block,
'invalid',
'bad-cb-amount',
0);
}
// Push onto verification queue.
const jobs = [];
for (let i = 0; i < block.txs.length; i++) {
const tx = block.txs[i];
jobs.push(tx.verifyAsync(view, state.flags, this.workers));
}
// Verify all txs in parallel.
const results = await Promise.all(jobs);
for (const result of results) {
if (!result) {
throw new VerifyError(block,
'invalid',
'mandatory-script-verify-flag-failed',
100);
}
}
return view;
}
/**
* Get main chain height for hash.
* @param {Hash} hash
* @returns {Promise<Number>}
*/
async getMainHeight(hash) {
const entry = await this.db.getEntry(hash);
if (!entry)
return -1;
// Must be the current chain.
if (!await this.db.isMainChain(entry))
return -1;
return entry.height;
}
/**
* Verify a renewal.
* @param {Hash} hash
* @param {Number} height
* @returns {Promise<Boolean>}
*/
async verifyRenewal(hash, height) {
assert(Buffer.isBuffer(hash));
assert((height >>> 0) === height);
// Cannot renew yet.
if (height < this.network.names.renewalMaturity)
return true;
// We require renewals to commit to a block
// within the past 6 months, to prove that
// the user still owns the key. This prevents
// people from presigning thousands of years
// worth of renewals. The block must be at
// least 400 blocks back to prevent the
// possibility of a reorg invalidating the
// covenant.
const entry = await this.db.getEntry(hash);
if (!entry)
return false;
// Must be the current chain.
if (!await this.db.isMainChain(entry))
return false;
// Make sure it's a mature block (unlikely to be reorgd).
if (entry.height > height - this.network.names.renewalMaturity)
return false;
// Block committed to must be
// no older than a 6 months.
if (entry.height < height - this.network.names.renewalPeriod)
return false;
return true;
}
/**
* Verify covenants.
* @param {TX} tx
* @param {CoinView} view
* @param {Number} height
* @param {Number} nameFlags
*/
async verifyCovenants(tx, view, height, nameFlags) {
assert(tx);
assert(view instanceof CoinView);
assert((height >>> 0) === height);
assert(typeof nameFlags === 'number');
const {types} = rules;
const network = this.network;
for (let i = 0; i < tx.outputs.length; i++) {
const output = tx.outputs[i];
const {covenant} = output;
if (!covenant.isName())
continue;
// BID and REDEEM covenants to do not update NameState.
// Therefore, if we are still inside checkpoints we can simply
// assume these covenants are valid without checking anything,
// or even getting and decoding the NameState from the tree.
// We could skip checks for ALL covenant types under checkpoints,
// but since the other types modify the NameState we still
// need to get the data, and the checks themselves are cheap.
if (this.isHistoricalHeight(height)) {
if (covenant.isBid() || covenant.isRedeem())
continue;
}
const nameHash = covenant.getHash(0);
const start = covenant.getU32(1);
const ns = await view.getNameState(this.db, nameHash);
if (ns.isNull()) {
if (!covenant.isClaim() && !covenant.isOpen())
throw new Error('Database inconsistency.');
const name = covenant.get(2);
ns.set(name, height);
}
// Check for name expiration/revocation.
// Note that claimed names never expire
// before the reservation period ends.
// However, they _can_ be revoked.
ns.maybeExpire(height, network);
// Calculate the current state.
const state = ns.state(height, network);
// none -> claim
if (covenant.isClaim()) {
const flags = covenant.getU8(3);
const weak = (flags & 1) !== 0;
// Claims can be re-redeemed any time
// before registration. This is required
// in order for our emergency soft-forks
// to truly behave as _soft_ forks. Once
// re-redeemed, the locktime resets and
// they re-enter the LOCKED state. Note
// that a newer claim invalidates the
// old output by committing to a higher
// height (will fail with nonlocal).
const valid = state === states.OPENING
|| state === states.LOCKED
|| (state === states.CLOSED && !ns.registered);
if (!valid) {
throw new VerifyError(tx,
'invalid',
'bad-claim-state',
100);
}
// Can only claim reserved names.
// Once a reserved name is revoked,
// it is no longer claimable.
if (ns.expired || !rules.isReserved(nameHash, height, network)) {
throw new VerifyError(tx,
'invalid',
'bad-claim-notreserved',
100);
}
// Once the fork is active, we reject
// any weak algorithms (i.e. RSA-1024).
// Any future emergency soft-forks should
// also be included below this check.
if ((nameFlags & VERIFY_COVENANTS_HARDENED) && weak) {
throw new VerifyError(tx,
'invalid',
'bad-claim-algorithm',
100);
}
// Check commitment hash.
const block = covenant.getHash(4);
const claimed = await this.getMainHeight(block);
// Implicitly checks for `-1`.
if (claimed !== covenant.getU32(5)) {
throw new VerifyError(tx,
'invalid',
'bad-claim-commit-height',
100);
}
// Implicitly disallows the genesis block.
if (claimed <= ns.claimed) {
throw new VerifyError(tx,
'invalid',
'bad-claim-commit-hash',
100);
}
assert(claimed >= 1);
// Handle inflation-fixing soft-fork.
if (height >= network.deflationHeight) {
const {claimFrequency} = network.names;
// Require claim height to be 1 on
// initial claims. This makes some
// non-contextual verification easier.
if (ns.owner.isNull()) {
if (claimed !== 1) {
throw new VerifyError(tx,
'invalid',
'bad-claim-height',
0);
}
}
// Limit the frequency of re-claims.
if (!ns.owner.isNull() && height < ns.height + claimFrequency) {
throw new VerifyError(tx,
'invalid',
'bad-claim-frequency',
0);
}
// Allow replacement, but require the
// same fee, which is then miner-burned.
if (!ns.owner.isNull()) {
const coin = await this.getCoin(ns.owner.hash, ns.owner.index);
if (!coin || output.value !== coin.value) {
throw new VerifyError(tx,
'invalid',
'bad-claim-value',
0);
}
}
}
ns.setHeight(height);
ns.setRenewal(height);
ns.setClaimed(claimed);
ns.setValue(0);
ns.setOwner(tx.outpoint(i));
ns.setHighest(0);
ns.setWeak(weak);
continue;
}
assert(!tx.isCoinbase());
// none/redeem/open -> open
if (covenant.isOpen()) {
if (state !== states.OPENING) {
throw new VerifyError(tx,
'invalid',
'bad-open-state',
100);
}
// Only one open transaction can ever exist.
if (ns.height !== height) {
throw new VerifyError(tx,
'invalid',
'bad-open-multiple',
100);
}
// Cannot bid on a reserved name.
if (!ns.expired && rules.isReserved(nameHash, height, network)) {
throw new VerifyError(tx,
'invalid',
'bad-open-reserved',
100);
}
// Make sure locked up names are not opened if ICANN LOCKUP has
// activated.
const isLockUpActive = nameFlags & VERIFY_COVENANTS_LOCKUP;
if (isLockUpActive && rules.isLockedUp(nameHash, height, network)) {
throw new VerifyError(tx,
'invalid',
'bad-open-lockedup',
100);
}
// On mainnet, names are released on a
// weekly basis for the first year.
if (!rules.hasRollout(nameHash, height, network)) {
throw new VerifyError(tx,
'invalid',
'bad-open-rollout',
100);
}
continue;
}
// none/redeem/open -> bid
if (covenant.isBid()) {
if (state !== states.BIDDING) {
throw new VerifyError(tx,
'invalid',
'bad-bid-state',
100);
}
if (start !== ns.height) {
throw new VerifyError(tx,
'invalid',
'bad-bid-height',
100);
}
continue;
}
assert(i < tx.inputs.length);
const {prevout} = tx.inputs[i];
switch (covenant.type) {
// bid -> reveal
case types.REVEAL: {
if (start !== ns.height) {
throw new VerifyError(tx,
'invalid',
'bad-reveal-nonlocal',
100);
}
// Early reveals? No.
if (state !== states.REVEAL) {
throw new VerifyError(tx,
'invalid',
'bad-reveal-state',
100);
}
if (ns.owner.isNull() || output.value > ns.highest) {
ns.setValue(ns.highest);
ns.setOwner(tx.outpoint(i));
ns.setHighest(output.value);
} else if (output.value > ns.value) {
ns.setValue(output.value);
}
break;
}
// reveal -> redeem
case types.REDEEM: {
if (start !== ns.height) {
throw new VerifyError(tx,
'invalid',
'bad-redeem-nonlocal',
100);
}
// Allow participants to get their
// money out, even in a revoked state.
if (state < states.CLOSED) {
throw new VerifyError(tx,
'invalid',
'bad-redeem-state',
100);
}
// Must be the loser in order
// to redeem the money now.
if (prevout.equals(ns.owner)) {
throw new VerifyError(tx,
'invalid',
'bad-redeem-owner',
100);
}
break;
}
// claim/reveal -> register
case types.REGISTER: {
if (start !== ns.height) {
throw new VerifyError(tx,
'invalid',
'bad-register-nonlocal',
100);
}
if (state !== states.CLOSED) {
throw new VerifyError(tx,
'invalid',
'bad-register-state',
100);
}
const data = covenant.get(2);
const hash = covenant.getHash(3);
// Verify block hash for renewal.
if (!await this.verifyRenewal(hash, height)) {
throw new VerifyError(tx,
'invalid',
'bad-register-renewal',
100);
}
// Must be the winner in
// order to redeem the name.
if (!prevout.equals(ns.owner)) {
throw new VerifyError(tx,
'invalid',
'bad-register-owner',
100);
}
// Must match the second highest bid.
if (output.value !== ns.value) {
throw new VerifyError(tx,
'invalid',
'bad-register-value',
100);
}
// For claimed names: if the keys used in
// the proof were somehow compromised, the
// name becomes locked until the reservation
// period ends. Note that this is the same
// code path that can be used for emergency
// soft-forks in the case that a large name
// registrar's keys are compromised.
if (ns.isClaimable(height, network)) {
// Soft-fork #1 (RSA hardening).
if ((nameFlags & VERIFY_COVENANTS_HARDENED) && ns.weak) {
throw new VerifyError(tx,
'invalid',
'bad-register-state',
100);
}
// Emergency soft-forks go here.
// Use only to prevent sky from falling.
//
// A vision for an emergency soft-fork:
//
// 1. A list of compromised DNSKEYs are collected
// out of band.
// 2. The chain is scanned on first boot in order
// to find proofs which are vulnerable. The
// relevant names are marked as such.
// - Pruned nodes and nodes without witness
// data will unfortunately need to re-sync.
// 3. Any proof published before the flag day
// is also marked in this way if it contains
// a vulnerable key.
// 4. At soft-fork activation, the "vulnerable"
// check will take place here. This function
// should return true for any name that was
// redeemed with a vulnerable key.
//
// To future generations:
// PUT THE VULNERABLE KEY CHECK HERE!
}
ns.setRegistered(true);
ns.setOwner(tx.outpoint(i));
if (data.length > 0)
ns.setData(data);
ns.setRenewal(height);
break;
}
// update/renew/register/finalize -> update
case types.UPDATE: {
if (start !== ns.height) {
throw new VerifyError(tx,
'invalid',
'bad-update-nonlocal',
100);
}
if (state !== states.CLOSED) {
throw new VerifyError(tx,
'invalid',
'bad-update-state',
100);
}
const data = covenant.get(2);
ns.setOwner(tx.outpoint(i));
if (data.length > 0)
ns.setData(data);
ns.setTransfer(0);
break;
}
// update/renew/register/finalize -> renew
case types.RENEW: {
if (start !== ns.height) {
throw new VerifyError(tx,
'invalid',
'bad-renewal-nonlocal',
100);
}
if (state !== states.CLOSED) {
throw new VerifyError(tx,
'invalid',
'bad-renewal-state',
100);
}
const hash = covenant.getHash(2);
if (height < ns.renewal + network.names.treeInterval) {
throw new VerifyError(tx,
'invalid',
'bad-renewal-premature',
100);
}
if (!await this.verifyRenewal(hash, height)) {
throw new VerifyError(tx,
'invalid',
'bad-renewal',
100);
}
ns.setOwner(tx.outpoint(i));
ns.setTransfer(0);
ns.setRenewal(height);
ns.setRenewals(ns.renewals + 1);
break;
}
// update/renew/register/finalize -> transfer
case types.TRANSFER: {
if (start !== ns.height) {
throw new VerifyError(tx,
'invalid',
'bad-transfer-nonlocal',
100);
}
if (state !== states.CLOSED) {
throw new VerifyError(tx,
'invalid',
'bad-transfer-state',
100);
}
ns.setOwner(tx.outpoint(i));
assert(ns.transfer === 0);
ns.setTransfer(height);
break;
}
// transfer -> finalize
case types.FINALIZE: {
if (start !== ns.height) {
throw new VerifyError(tx,
'invalid',
'bad-finalize-nonlocal',
100);
}
if (state !== states.CLOSED) {
throw new VerifyError(tx,
'invalid',
'bad-finalize-state',
100);
}
assert(ns.transfer !== 0);
assert(network.names.transferLockup >= network.names.treeInterval);
if (height < ns.transfer + network.names.transferLockup) {
throw new VerifyError(tx,
'invalid',
'bad-finalize-maturity',
100);
}
const flags = covenant.getU8(3);
const weak = (flags & 1) !== 0;
const claimed = covenant.getU32(4);
const renewals = covenant.getU32(5);
const hash = covenant.getHash(6);
if (weak !== ns.weak
|| claimed !== ns.claimed
|| renewals !== ns.renewals) {
throw new VerifyError(tx,
'invalid',
'bad-finalize-statetransfer',
100);
}
if (!await this.verifyRenewal(hash, height)) {
throw new VerifyError(tx,
'invalid',
'bad-finalize-renewal',
100);
}
ns.setOwner(tx.outpoint(i));
ns.setTransfer(0);
ns.setRenewal(height);
ns.setRenewals(ns.renewals + 1);
break;
}
// register/update/renew/transfer/finalize -> revoke
case types.REVOKE: {
if (start !== ns.height) {
throw new VerifyError(tx,
'invalid',
'bad-revoke-nonlocal',
100);
}
if (state !== states.CLOSED) {
throw new VerifyError(tx,
'invalid',
'bad-revoke-state',
100);
}
assert(ns.revoked === 0);
ns.setRevoked(height);
ns.setTransfer(0);
ns.setData(null);
break;
}
default: {
assert.fail('Invalid covenant type.');
break;
}
}
}
return;
}
/**
* Find the block at which a fork ocurred.
* @private
* @param {ChainEntry} fork - The current chain.
* @param {ChainEntry} longer - The competing chain.
* @returns {Promise<ChainEntry>}
*/
async findFork(fork, longer) {
while (!fork.hash.equals(longer.hash)) {
while (longer.height > fork.height) {
longer = await this.getPrevious(longer);
if (!longer)
throw new Error('No previous entry for new tip.');
}
if (fork.hash.equals(longer.hash))
return fork;
fork = await this.getPrevious(fork);
if (!fork)
throw new Error('No previous entry for old tip.');
}
return fork;
}
/**
* Reorganize the blockchain (connect and disconnect inputs).
* Called when a competing chain with a higher chainwork
* is received.
* @private
* @param {ChainEntry} competitor - The competing chain's tip.
* @returns {Promise<ChainEntry>} - Fork block.
*/
async reorganize(competitor) {
const tip = this.tip;
const fork = await this.findFork(tip, competitor);
assert(fork, 'No free space or data corruption.');
// Blocks to disconnect.
const disconnect = [];
let entry = tip;
while (!entry.hash.equals(fork.hash)) {
disconnect.push(entry);
entry = await this.getPrevious(entry);
assert(entry);
}
// Blocks to connect.
const connect = [];
entry = competitor;
while (!entry.hash.equals(fork.hash)) {
connect.push(entry);
entry = await this.getPrevious(entry);
assert(entry);
}
// Disconnect blocks/txs.
for (let i = 0; i < disconnect.length; i++) {
const entry = disconnect[i];
await this.disconnect(entry);
}
// Connect blocks/txs.
// We don't want to connect the new tip here.
// That will be done outside in setBestChain.
for (let i = connect.length - 1; i >= 1; i--) {
const entry = connect[i];
try {
await this.reconnect(entry);
} catch (err) {
if (err.type === 'VerifyError') {
if (!err.malleated) {
while (i--)
this.setInvalid(connect[i].hash);
}
if (this.tip.chainwork.lte(tip.chainwork))
await this.unreorganize(fork, tip);
}
throw err;
}
}
this.logger.warning(
'Chain reorganization: old=%x(%d) new=%x(%d)',
tip.hash,
tip.height,
competitor.hash,
competitor.height
);
await this.emitAsync('reorganize', tip, competitor, fork);
return fork;
}
/**
* Revert a failed reorganization.
* @private
* @param {ChainEntry} fork - The common ancestor.
* @param {ChainEntry} last - The previous valid tip.
* @returns {Promise}
*/
async unreorganize(fork, last) {
const tip = this.tip;
// Blocks to disconnect.
const disconnect = [];
let entry = tip;
while (!entry.hash.equals(fork.hash)) {
disconnect.push(entry);
entry = await this.getPrevious(entry);
assert(entry);
}
// Blocks to connect.
const connect = [];
entry = last;
while (!entry.hash.equals(fork.hash)) {
connect.push(entry);
entry = await this.getPrevious(entry);
assert(entry);
}
// Disconnect blocks/txs.
for (let i = 0; i < disconnect.length; i++) {
const entry = disconnect[i];
await this.disconnect(entry);
}
// Connect blocks/txs.
for (let i = connect.length - 1; i >= 0; i--) {
const entry = connect[i];
await this.reconnect(entry);
}
this.logger.warning(
'Chain un-reorganization: old=%x(%d) new=%x(%d)',
tip.hash,
tip.height,
last.hash,
last.height
);
// Treat as a reorganize event.
await this.emitAsync('reorganize', tip, last, fork);
}
/**
* Reorganize the blockchain for SPV. This
* will reset the chain to the fork block.
* @private
* @param {ChainEntry} competitor - The competing chain's tip.
* @returns {Promise}
*/
async reorganizeSPV(competitor) {
const tip = this.tip;
const fork = await this.findFork(tip, competitor);
assert(fork, 'No free space or data corruption.');
// Buffer disconnected blocks.
const disconnect = [];
let entry = tip;
while (!entry.hash.equals(fork.hash)) {
disconnect.push(entry);
entry = await this.getPrevious(entry);
assert(entry);
}
// Reset the main chain back
// to the fork block, causing
// us to redownload the blocks
// on the new main chain.
await this._reset(fork.hash, false);
// Emit disconnection events now that
// the chain has successfully reset.
for (const entry of disconnect) {
const headers = entry.toHeaders();
const view = new CoinView();
await this.emitAsync('disconnect', entry, headers, view);
}
this.logger.warning(
'SPV reorganization: old=%x(%d) new=%x(%d)',
tip.hash,
tip.height,
competitor.hash,
competitor.height
);
this.logger.warning(
'Chain replay from height %d necessary.',
fork.height);
return this.emitAsync('reorganize', tip, competitor, fork);
}
/**
* Disconnect an entry from the chain (updates the tip).
* @param {ChainEntry} entry
* @returns {Promise}
*/
async disconnect(entry) {
let block = await this.getBlock(entry.hash);
if (!block) {
if (!this.options.spv)
throw new Error('Block not found.');
block = entry.toHeaders();
}
const prev = await this.getPrevious(entry);
const view = await this.db.disconnect(entry, block);
assert(prev);
this.tip = prev;
this.height = prev.height;
this.emit('tip', prev);
return this.emitAsync('disconnect', entry, block, view);
}
/**
* Reconnect an entry to the chain (updates the tip).
* This will do contextual-verification on the block
* (necessary because we cannot validate the inputs
* in alternate chains when they come in).
* @param {ChainEntry} entry
* @returns {Promise}
*/
async reconnect(entry) {
const flags = common.flags.VERIFY_NONE;
let block = await this.getBlock(entry.hash);
if (!block) {
if (!this.options.spv)
throw new Error('Block not found.');
block = entry.toHeaders();
}
const prev = await this.getPrevious(entry);
assert(prev);
let view, state;
try {
[view, state] = await this.verifyContext(block, prev, flags);
} catch (err) {
if (err.type === 'VerifyError') {
if (!err.malleated)
this.setInvalid(entry.hash);
this.logger.warning(
'Tried to reconnect invalid block: %x (%d).',
entry.hash, entry.height);
}
throw err;
}
await this.db.reconnect(entry, block, view);
this.tip = entry;
this.height = entry.height;
this.setDeploymentState(state);
this.emit('tip', entry);
this.emit('reconnect', entry, block);
if ((entry.height % this.network.names.treeInterval) === 0)
this.emit('tree commit', this.db.tree.rootHash(), entry, block);
return this.emitAsync('connect', entry, block, view);
}
/**
* Set the best chain. This is called on every incoming
* block with greater chainwork than the current tip.
* @private
* @param {ChainEntry} entry
* @param {Block} block
* @param {ChainEntry} prev
* @param {Number} flags
* @returns {Promise}
*/
async setBestChain(entry, block, prev, flags) {
const tip = this.tip;
let fork = null;
// A higher fork has arrived.
// Time to reorganize the chain.
if (!entry.prevBlock.equals(this.tip.hash)) {
try {
// Do as much verification
// as we can before reorganizing.
await this.verify(block, prev, flags);
} catch (err) {
if (err.type === 'VerifyError') {
if (!err.malleated)
this.setInvalid(entry.hash);
this.logger.warning(
'Tried to connect invalid block: %x (%d).',
entry.hash,