diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py --- a/qa/pull-tester/rpc-tests.py +++ b/qa/pull-tester/rpc-tests.py @@ -157,6 +157,7 @@ 'abc-p2p-activation.py', 'abc-p2p-fullblocktest.py', 'abc-rpc.py', + 'abc-ec.py', 'mempool-accept-txn.py', ] if ENABLE_ZMQ: diff --git a/qa/rpc-tests/abc-ec.py b/qa/rpc-tests/abc-ec.py new file mode 100755 --- /dev/null +++ b/qa/rpc-tests/abc-ec.py @@ -0,0 +1,393 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2017 The Bitcoin developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +This test checks the new consensus behavior. +It is derived from the much more complex p2p-fullblocktest. +""" + +# TODO: remove unnecessary code from the fullblocktest + +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import * +from test_framework.comptool import TestManager, TestInstance, RejectResult +from test_framework.blocktools import * +import time +from test_framework.key import CECKey +from test_framework.script import * +from test_framework.cdefs import (ONE_MEGABYTE, LEGACY_MAX_BLOCK_SIZE, + MAX_BLOCK_SIGOPS_PER_MB, MAX_TX_SIGOPS_COUNT) + +import unittest + +# far into the past +UAHF_START_TIME = 30000000 + + +class PreviousSpendableOutput(object): + + def __init__(self, tx=CTransaction(), n=-1): + self.tx = tx + self.n = n # the output we're spending + +# TestNode: A peer we use to send messages to bitcoind, and store responses. + + +class TestNode(SingleNodeConnCB): + + def __init__(self): + self.last_sendcmpct = None + self.last_cmpctblock = None + self.last_getheaders = None + self.last_headers = None + SingleNodeConnCB.__init__(self) + + def on_sendcmpct(self, conn, message): + self.last_sendcmpct = message + + def on_cmpctblock(self, conn, message): + self.last_cmpctblock = message + self.last_cmpctblock.header_and_shortids.header.calc_sha256() + + def on_getheaders(self, conn, message): + self.last_getheaders = message + + def on_headers(self, conn, message): + self.last_headers = message + for x in self.last_headers.headers: + x.calc_sha256() + + def clear_block_data(self): + with mininode_lock: + self.last_sendcmpct = None + self.last_cmpctblock = None + + +class FullBlockTest(ComparisonTestFramework): + + # Can either run this test as 1 node with expected answers, or two and compare them. + # Change the "outcome" variable from each TestInstance object to only do + # the comparison. + + def __init__(self): + super().__init__() + self.excessive_block_size = 8 * ONE_MEGABYTE + self.num_nodes = 4 + self.block_heights = {} + self.coinbase_key = CECKey() + self.coinbase_key.set_secretbytes(b"fatstacks") + self.coinbase_pubkey = self.coinbase_key.get_pubkey() + self.tip = None + self.blocks = {} + + def sync_all(self): + if self.is_network_split: + sync_blocks(self.nodes[:2], timeout=15) + sync_blocks(self.nodes[2:], timeout=15) + sync_mempools(self.nodes[:2]) + sync_mempools(self.nodes[2:]) + else: + sync_blocks(self.nodes) + sync_mempools(self.nodes) + + def setup_network(self): + self.excessive_block_size_2 = 16 * ONE_MEGABYTE + self.is_network_split = True + self.array_opts_node_0 = ['-debug', + '-norelaypriority', + '-whitelist=127.0.0.1', + '-limitancestorcount=9999', + '-limitancestorsize=9999', + '-limitdescendantcount=9999', + '-limitdescendantsize=9999', + '-maxmempool=999', + "-uahfstarttime=%d" % UAHF_START_TIME, + "-excessiveblocksize=%d" + % self.excessive_block_size] + self.array_opts = ['-debug', + '-norelaypriority', + '-whitelist=127.0.0.1', + '-limitancestorcount=9999', + '-limitancestorsize=9999', + '-limitdescendantcount=9999', + '-limitdescendantsize=9999', + '-maxmempool=999', + "-uahfstarttime=%d" % UAHF_START_TIME, + "-excessiveblocksize=%d" + % self.excessive_block_size] + + self.extra_args = [ + self.array_opts_node_0, self.array_opts_node_0, self.array_opts, self.array_opts] + self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, + self.extra_args, + binary=[self.options.testbinary, self.options.testbinary, self.options.testbinary, self.options.testbinary]) + + connect_nodes_bi(self.nodes, 0, 1) + connect_nodes_bi(self.nodes, 2, 3) + self.sync_all() + + def add_options(self, parser): + super().add_options(parser) + parser.add_option( + "--runbarelyexpensive", dest="runbarelyexpensive", default=True) + + def run_test(self): + self.test = TestManager(self, self.options.tmpdir) + self.test.add_all_connections(self.nodes) + # Start up network handling in another thread + NetworkThread().start() + # Set the blocksize to 16MB as initial condition + self.nodes[0].setexcessiveblock(self.excessive_block_size_2) + self.test.run() + + def add_transactions_to_block(self, block, tx_list): + [tx.rehash() for tx in tx_list] + block.vtx.extend(tx_list) + + # this is a little handier to use than the version in blocktools.py + def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): + tx = create_transaction(spend_tx, n, b"", value, script) + return tx + + # sign a transaction, using the key we know about + # this signs input 0 in tx, which is assumed to be spending output n in + # spend_tx + def sign_tx(self, tx, spend_tx, n): + scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) + if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend + tx.vin[0].scriptSig = CScript() + return + sighash = SignatureHashForkId( + spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue) + tx.vin[0].scriptSig = CScript( + [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) + + def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): + tx = self.create_tx(spend_tx, n, value, script) + self.sign_tx(tx, spend_tx, n) + tx.rehash() + return tx + + def next_block(self, number, spend=None, additional_coinbase_value=0, script=None, extra_sigops=0, block_size=0, solve=True, submit=True, base_hash=None, base_time=None, base_height=None): + """ + Create a block on top of self.tip, and advance self.tip to point to the new block + if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend + output, and rest will go to fees. + """ + if self.tip == None: + base_block_hash = self.genesis_hash + block_time = int(time.time()) + 1 + else: + if base_hash == None and base_time == None: + base_block_hash = self.tip.sha256 + block_time = self.tip.nTime + 1 + else: + base_block_hash = base_hash + block_time = base_time + # First create the coinbase + if base_height == None: + height = self.block_heights[base_block_hash] + 1 + else: + height = base_height + coinbase = create_coinbase(height, self.coinbase_pubkey) + coinbase.vout[0].nValue += additional_coinbase_value + if (spend != None): + coinbase.vout[0].nValue += spend.tx.vout[ + spend.n].nValue - 1 # all but one satoshi to fees + coinbase.rehash() + block = create_block(base_block_hash, coinbase, block_time) + spendable_output = None + if (spend != None): + tx = CTransaction() + # no signature yet + tx.vin.append( + CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) + # We put some random data into the first transaction of the chain + # to randomize ids + tx.vout.append( + CTxOut(0, CScript([random.randint(0, 255), OP_DROP, OP_TRUE]))) + if script == None: + tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) + else: + tx.vout.append(CTxOut(1, script)) + spendable_output = PreviousSpendableOutput(tx, 0) + + # Now sign it if necessary + scriptSig = b"" + scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey) + if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend + scriptSig = CScript([OP_TRUE]) + else: + # We have to actually sign it + sighash = SignatureHashForkId( + spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend.tx.vout[spend.n].nValue) + scriptSig = CScript( + [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) + tx.vin[0].scriptSig = scriptSig + # Now add the transaction to the block + self.add_transactions_to_block(block, [tx]) + block.hashMerkleRoot = block.calc_merkle_root() + if spendable_output != None and block_size > 0: + while len(block.serialize()) < block_size: + tx = CTransaction() + script_length = block_size - len(block.serialize()) - 79 + if script_length > 510000: + script_length = 500000 + tx_sigops = min( + extra_sigops, script_length, MAX_TX_SIGOPS_COUNT) + extra_sigops -= tx_sigops + script_pad_len = script_length - tx_sigops + script_output = CScript( + [b'\x00' * script_pad_len] + [OP_CHECKSIG] * tx_sigops) + tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) + tx.vout.append(CTxOut(0, script_output)) + tx.vin.append( + CTxIn(COutPoint(spendable_output.tx.sha256, spendable_output.n))) + spendable_output = PreviousSpendableOutput(tx, 0) + self.add_transactions_to_block(block, [tx]) + block.hashMerkleRoot = block.calc_merkle_root() + # Make sure the math above worked out to produce the correct block size + # (the math will fail if there are too many transactions in the block) + assert_equal(len(block.serialize()), block_size) + # Make sure all the requested sigops have been included + assert_equal(extra_sigops, 0) + if solve: + block.solve() + if submit: + self.tip = block + self.block_heights[block.sha256] = height + assert number not in self.blocks + self.blocks[number] = block + return block + + def get_tests(self): + self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) + self.block_heights[self.genesis_hash] = 0 + spendable_outputs = [] + + # save the current tip so it can be spent by a later block + def save_spendable_output(): + spendable_outputs.append(self.tip) + + # get an output that we previously marked as spendable + def get_spendable_output(): + return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) + + # returns a test case that asserts that the current tip was accepted + def accepted(): + return TestInstance([[self.tip, True]]) + + # returns a test case that asserts that the current tip was rejected + def rejected(reject=None): + if reject is None: + return TestInstance([[self.tip, False]]) + else: + return TestInstance([[self.tip, reject]]) + + # move the tip back to a previous block + def tip(number): + self.tip = self.blocks[number] + + # adds transactions to the block and updates state + def update_block(block_number, new_transactions): + block = self.blocks[block_number] + self.add_transactions_to_block(block, new_transactions) + old_sha256 = block.sha256 + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + # Update the internal state just like in next_block + self.tip = block + if block.sha256 != old_sha256: + self.block_heights[ + block.sha256] = self.block_heights[old_sha256] + del self.block_heights[old_sha256] + self.blocks[block_number] = block + return block + + # shorthand for functions + block = self.next_block + + # Create a new block + block(0) + save_spendable_output() + yield accepted() + + # Now we need that block to mature so we can spend the coinbase. + test = TestInstance(sync_every_block=False) + for i in range(99): + block(5000 + i) + test.blocks_and_transactions.append([self.tip, True]) + save_spendable_output() + yield test + + # collect spendable outputs now to avoid cluttering the code later on + out = [] + for i in range(100): + out.append(get_spendable_output()) + + # Let's build some blocks and test them. + for i in range(8): + n = i + 1 + block(n, spend=out[i], block_size=n * ONE_MEGABYTE) + yield accepted() + + # block of maximal size + block(9, spend=out[8], block_size=self.excessive_block_size) + yield accepted() + + # Consensus code, the network is split: + # Node 0 and 1 are connected + # Node 2 and 3 are connected + assert_equal(self.nodes[0].getblockcount(), 109) + assert_equal(self.nodes[1].getblockcount(), 109) + assert_equal(self.nodes[2].getblockcount(), 109) + assert_equal(self.nodes[3].getblockcount(), 109) + + # Node 2 and 3 will continue mining the chain + new_blocks = self.nodes[2].generate(40) + self.sync_all() + assert_equal(self.nodes[0].getblockcount(), 109) + assert_equal(self.nodes[1].getblockcount(), 109) + assert_equal(self.nodes[2].getblockcount(), 149) + assert_equal(self.nodes[3].getblockcount(), 149) + node_2_chain_hash = self.nodes[2].getbestblockhash() + + # Node 0 will follow a chain that have bigger blocks than the + # exessive_block_size (This is possible because of the ec-policy) + base_block_hash = self.tip.sha256 + block_time = self.tip.nTime + 1 + block_height = self.block_heights[base_block_hash] + 1 + block_temp = block( + 10, spend=out[9], block_size=self.excessive_block_size + 100, solve=False, submit=False, base_hash=base_block_hash, base_time=block_time, base_height=block_height) + block_temp.solve() + self.nodes[0].submitblock(ToHex(block_temp)) + # The block was submited + assert_equal(self.nodes[0].getblockcount(), 110) + + self.nodes[0].generate(2) + assert_equal(self.nodes[0].getblockcount(), 112) + big_blocks_hash = self.nodes[0].getbestblockhash() + node_1_last_hash = self.nodes[1].getbestblockhash() + assert_equal(self.nodes[1].getblockcount(), 109) + + # Node 0 and 1 can not sync because they have diferent + # exessive_block_size + try: + self.sync_all() + except AssertionError: + assert (True) + assert_equal(self.nodes[0].getbestblockhash(), big_blocks_hash) + assert_equal(self.nodes[1].getbestblockhash(), node_1_last_hash) + assert_equal(self.nodes[1].getblockcount(), 109) + + self.nodes[0].generate(4) + big_blocks_hash = self.nodes[0].getbestblockhash() + self.sync_all() + assert_equal(self.nodes[0].getbestblockhash(), big_blocks_hash) + assert_equal(self.nodes[1].getbestblockhash(), big_blocks_hash) + + +if __name__ == '__main__': + FullBlockTest().main() diff --git a/src/Makefile.am b/src/Makefile.am --- a/src/Makefile.am +++ b/src/Makefile.am @@ -119,6 +119,8 @@ noui.h \ policy/fees.h \ policy/policy.h \ + policy/ec/blocksizepolicyinterface.h \ + policy/ec/ecblocksize.h \ pow.h \ protocol.h \ random.h \ @@ -199,6 +201,7 @@ noui.cpp \ policy/fees.cpp \ policy/policy.cpp \ + policy/ec/ecblocksize.cpp \ pow.cpp \ rest.cpp \ rpc/abc.cpp \ diff --git a/src/chain.h b/src/chain.h --- a/src/chain.h +++ b/src/chain.h @@ -146,10 +146,15 @@ BLOCK_HAVE_UNDO = 16, BLOCK_HAVE_MASK = BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO, - //!< stage after last reached validness failed - BLOCK_FAILED_VALID = 32, - //!< descends from failed block - BLOCK_FAILED_CHILD = 64, + BLOCK_EXCESSIVE = + 32, // BU: This block is bigger than what we really want to accept. + + // //!< stage after last reached validness failed + // BLOCK_FAILED_VALID = 32, + // //!< descends from failed block + // BLOCK_FAILED_CHILD = 64, + BLOCK_FAILED_VALID = 64, //! stage after last reached validness failed + BLOCK_FAILED_CHILD = 128, //! descends from failed block BLOCK_FAILED_MASK = BLOCK_FAILED_VALID | BLOCK_FAILED_CHILD, }; diff --git a/src/config.h b/src/config.h --- a/src/config.h +++ b/src/config.h @@ -5,8 +5,9 @@ #ifndef BITCOIN_CONFIG_H #define BITCOIN_CONFIG_H +#include "policy/ec/blocksizepolicyinterface.h" +#include "validation.h" #include - #include class CChainParams; @@ -18,15 +19,41 @@ virtual bool SetUAHFStartTime(int64_t uahfStartTime) = 0; virtual int64_t GetUAHFStartTime() const = 0; virtual const CChainParams &GetChainParams() const = 0; + + virtual void SetBlockPolicy(std::unique_ptr) = 0; + virtual void UpdateConsensusBlockPolicy() const = 0; + + virtual uint64_t GetExcessiveAcceptDepth() const = 0; + virtual uint64_t GetExcessiveBlockChainReset() const = 0; + + // BU: CheckExcessive(const CBlock &block, uint64_t blockSize, uint64_t + // nSigOps, uint64_t nTx, uint64_t largestTx) + // Check whether this block is bigger in some metric than we really want to + // accept + virtual bool CheckExcessive(const CBlock &block) const = 0; }; class GlobalConfig final : public Config { public: + GlobalConfig(); + bool SetMaxBlockSize(uint64_t maxBlockSize); uint64_t GetMaxBlockSize() const; bool SetUAHFStartTime(int64_t uahfStartTime); int64_t GetUAHFStartTime() const; const CChainParams &GetChainParams() const; + + void SetBlockPolicy(std::unique_ptr) override; + void UpdateConsensusBlockPolicy() const override; + bool CheckExcessive(const CBlock &block) const override; + + uint64_t GetExcessiveAcceptDepth() const override; + uint64_t GetExcessiveBlockChainReset() const override; + +private: + std::unique_ptr blockPolicy; + uint64_t excessiveAcceptDepth; + uint64_t excessiveBlockChainReset; }; // Temporary woraround. diff --git a/src/config.cpp b/src/config.cpp --- a/src/config.cpp +++ b/src/config.cpp @@ -6,6 +6,19 @@ #include "chainparams.h" #include "consensus/consensus.h" #include "globals.h" +#include "policy/ec/ecblocksize.h" + +GlobalConfig::GlobalConfig() + : blockPolicy(new ECBlockSize(DEFAULT_MAX_BLOCK_SIZE)), + excessiveAcceptDepth(4), excessiveBlockChainReset(6 * 24) {} + +void GlobalConfig::SetBlockPolicy(std::unique_ptr p) { + blockPolicy = std::move(p); +} + +void GlobalConfig::UpdateConsensusBlockPolicy() const { + blockPolicy->updateConsensus(); +} bool GlobalConfig::SetMaxBlockSize(uint64_t maxBlockSize) { // Do not allow maxBlockSize to be set below historic 1MB limit @@ -13,12 +26,44 @@ if (maxBlockSize <= LEGACY_MAX_BLOCK_SIZE) { return false; } + if (blockPolicy != nullptr) { + blockPolicy->SetMaxBlockSize(maxBlockSize); + } nMaxBlockSize = maxBlockSize; return true; } +// BU: CheckExcessive(const CBlock &block, uint64_t blockSize, uint64_t nSigOps, +// uint64_t nTx, uint64_t largestTx) +// Right now we just want to verify the block's size +bool GlobalConfig::CheckExcessive(const CBlock &block) const { + // There is no way this block is of reasonable size. + // This can be removed, the size it's already set in the nBlockSize variable + if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > + blockPolicy->GetMaxBlockSize()) { + return true; + } + + if (block.nBlockSize > blockPolicy->GetMaxBlockSize()) { + return true; + } + + return false; +} + +uint64_t GlobalConfig::GetExcessiveAcceptDepth() const { + return excessiveAcceptDepth; +} +uint64_t GlobalConfig::GetExcessiveBlockChainReset() const { + return excessiveBlockChainReset; +} + uint64_t GlobalConfig::GetMaxBlockSize() const { + // assert(blockSizePolicy); + if (blockPolicy != nullptr) { + return blockPolicy->GetMaxBlockSize(); + } return nMaxBlockSize; } diff --git a/src/policy/ec/blocksizepolicyinterface.h b/src/policy/ec/blocksizepolicyinterface.h new file mode 100644 --- /dev/null +++ b/src/policy/ec/blocksizepolicyinterface.h @@ -0,0 +1,20 @@ +// Copyright (c) 2017 Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +#ifndef BITCOIN_BLOCKSIZEPOLICYINTERFACE_H +#define BITCOIN_BLOCKSIZEPOLICYINTERFACE_H + +#include + +class BlockSizePolicyInterface { +public: + // Hard limit for maximum block size. + virtual uint64_t GetMaxBlockSize() const = 0; + virtual void SetMaxBlockSize(uint64_t maxBlockSize) = 0; + virtual ~BlockSizePolicyInterface() = 0; + virtual void updateConsensus() = 0; +}; + +inline BlockSizePolicyInterface::~BlockSizePolicyInterface() {} + +#endif \ No newline at end of file diff --git a/src/policy/ec/ecblocksize.h b/src/policy/ec/ecblocksize.h new file mode 100644 --- /dev/null +++ b/src/policy/ec/ecblocksize.h @@ -0,0 +1,23 @@ +// Copyright (c) 2017 Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +#ifndef BITCOIN_ECBLOCKSIZE_H +#define BITCOIN_ECBLOCKSIZE_H + +#include "policy/ec/blocksizepolicyinterface.h" + +class ECBlockSize : public BlockSizePolicyInterface { +public: + ECBlockSize(uint64_t maxBlockSize); + ~ECBlockSize(); + + uint64_t GetMaxBlockSize() const override; + + void SetMaxBlockSize(uint64_t maxBlockSize) override; + void updateConsensus() override; + +private: + mutable uint64_t nMaxBlockSize; +}; + +#endif \ No newline at end of file diff --git a/src/policy/ec/ecblocksize.cpp b/src/policy/ec/ecblocksize.cpp new file mode 100644 --- /dev/null +++ b/src/policy/ec/ecblocksize.cpp @@ -0,0 +1,27 @@ +// Copyright (c) 2017 The Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include "policy/ec/ecblocksize.h" +#include "consensus/consensus.h" +#include +#include + +ECBlockSize::~ECBlockSize() {} + +ECBlockSize::ECBlockSize(uint64_t maxBlockSize) : nMaxBlockSize(maxBlockSize) {} + +uint64_t ECBlockSize::GetMaxBlockSize() const { + return nMaxBlockSize; +} + +void ECBlockSize::SetMaxBlockSize(uint64_t maxBlockSize) { + nMaxBlockSize = maxBlockSize; +} + +void ECBlockSize::updateConsensus() { + auto temp = nMaxBlockSize; + nMaxBlockSize = nMaxBlockSize * 2; + std::cout << "maxblocksize was modified from " << temp << " to " + << nMaxBlockSize << std::endl; + // nMaxBlockSize = nMaxBlockSize; +} \ No newline at end of file diff --git a/src/primitives/block.h b/src/primitives/block.h --- a/src/primitives/block.h +++ b/src/primitives/block.h @@ -65,6 +65,9 @@ // memory only mutable bool fChecked; + mutable bool fExcessive; // BU: is the block "excessive" (bigger than this + // node prefers to accept) + mutable uint64_t nBlockSize; // BU: length of this block in bytes CBlock() { SetNull(); } @@ -85,6 +88,8 @@ CBlockHeader::SetNull(); vtx.clear(); fChecked = false; + fExcessive = false; + nBlockSize = 0; } CBlockHeader GetBlockHeader() const { diff --git a/src/validation.cpp b/src/validation.cpp --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2238,8 +2238,8 @@ nLastSetChain = nNow; } } catch (const std::runtime_error &e) { - return AbortNode(state, std::string("System error while flushing: ") + - e.what()); + return AbortNode( + state, std::string("System error while flushing: ") + e.what()); } return true; } @@ -2506,7 +2506,9 @@ * Return the tip of the chain with the most work in it, that isn't known to be * invalid (it's however far from certain to be valid). */ -static CBlockIndex *FindMostWorkChain() { +static CBlockIndex *FindMostWorkChain(const Config &config) { + uint64_t excessiveAcceptDepth = config.GetExcessiveAcceptDepth(); + uint64_t EXCESSIVE_BLOCK_CHAIN_RESET = config.GetExcessiveBlockChainReset(); do { CBlockIndex *pindexNew = nullptr; @@ -2523,6 +2525,18 @@ // is an optimization, as we know all blocks in it are valid already. CBlockIndex *pindexTest = pindexNew; bool fInvalidAncestor = false; + uint64_t depth = 0; + bool fFailedChain = false; + bool fMissingData = false; + bool fRecentExcessive = + false; // Has there been a excessive block within our accept depth? + // Was there an excessive block prior to our accept depth (if so we + // ignore the accept depth -- this chain has + // already been accepted as valid) + bool fOldExcessive = false; + // follow the chain all the way back to where it joins the current + // active chain. + while (pindexTest && !chainActive.Contains(pindexTest)) { assert(pindexTest->nChainTx || pindexTest->nHeight == 0); @@ -2532,36 +2546,100 @@ // to a chain unless we have all the non-active-chain parent blocks. bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK; bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA); - if (fFailedChain || fMissingData) { - // Candidate chain is not usable (either invalid or missing - // data) - if (fFailedChain && - (pindexBestInvalid == nullptr || - pindexNew->nChainWork > pindexBestInvalid->nChainWork)) - pindexBestInvalid = pindexNew; - CBlockIndex *pindexFailed = pindexNew; - // Remove the entire chain from the set. - while (pindexTest != pindexFailed) { - if (fFailedChain) { - pindexFailed->nStatus |= BLOCK_FAILED_CHILD; - } else if (fMissingData) { - // If we're missing data, then add back to - // mapBlocksUnlinked, so that if the block arrives in - // the future we can try adding to - // setBlockIndexCandidates again. - mapBlocksUnlinked.insert( - std::make_pair(pindexFailed->pprev, pindexFailed)); - } - setBlockIndexCandidates.erase(pindexFailed); - pindexFailed = pindexFailed->pprev; - } - setBlockIndexCandidates.erase(pindexTest); - fInvalidAncestor = true; - break; + if (depth < excessiveAcceptDepth) { + // Unlimited: deny this candidate chain if there's a recent + // excessive block + fRecentExcessive |= + ((pindexTest->nStatus & BLOCK_EXCESSIVE) != 0); + } else { + // Unlimited: unless there is an even older excessive block + fOldExcessive |= ((pindexTest->nStatus & BLOCK_EXCESSIVE) != 0); } + + if (fFailedChain | fMissingData | fRecentExcessive) break; pindexTest = pindexTest->pprev; + depth++; + } + + // If there was a recent excessive block, check a certain distance + // beyond the acceptdepth to see if this chain + // has already seen an excessive block... if it has then allow the + // chain. + // This stops the client from always tracking excessiveDepth blocks + // behind the chain tip in a situation where + // lots of excessive blocks are being created. + // But after a while with no excessive blocks, we reset and our + // reluctance to accept an excessive block resumes + // on this chain. + // An alternate algorithm would be to move the excessive block size up + // to match the size of the accepted block, + // but this changes a user-defined field and is awkward to code because + // block sizes are not saved. + if ((fRecentExcessive && !fOldExcessive) && + (depth < excessiveAcceptDepth + EXCESSIVE_BLOCK_CHAIN_RESET)) { + CBlockIndex *chain = pindexTest; + // skip accept depth blocks, we are looking for an older excessive + while (chain && (depth < excessiveAcceptDepth)) { + // ABC: should only check in the forked chain to not reupdate + // the consensus rules when not needed + // Old blocks are considered valid and we only need to update + // the consensus if the new blocks do not + // Accept the consensus rules + // while (chain && (depth < excessiveAcceptDepth) && + // !chainActive.Contains(pindexTest)) + chain = chain->pprev; + depth++; + } + + while (chain && (depth < excessiveAcceptDepth + + EXCESSIVE_BLOCK_CHAIN_RESET)) { + // ABC + // while (chain && (depth < excessiveAcceptDepth + + // EXCESSIVE_BLOCK_CHAIN_RESET) && + // !chainActive.Contains(pindexTest)) + fOldExcessive |= ((chain->nStatus & BLOCK_EXCESSIVE) != 0); + chain = chain->pprev; + depth++; + } + } + + // Conditions where we want to reject the chain + if (fFailedChain || fMissingData || + (fRecentExcessive && !fOldExcessive)) { + // Candidate chain is not usable (either invalid or missing + // data) + if (fFailedChain && + (pindexBestInvalid == nullptr || + pindexNew->nChainWork > pindexBestInvalid->nChainWork)) + pindexBestInvalid = pindexNew; + CBlockIndex *pindexFailed = pindexNew; + // Remove the entire chain from the set. + while (pindexTest != pindexFailed) { + if (fFailedChain) { + pindexFailed->nStatus |= BLOCK_FAILED_CHILD; + } else if (fMissingData || + (fRecentExcessive && !fOldExcessive)) { + // If we're missing data, then add back to + // mapBlocksUnlinked, so that if the block arrives in + // the future we can try adding to + // setBlockIndexCandidates again. + mapBlocksUnlinked.insert( + std::make_pair(pindexFailed->pprev, pindexFailed)); + } + setBlockIndexCandidates.erase(pindexFailed); + pindexFailed = pindexFailed->pprev; + } + setBlockIndexCandidates.erase(pindexTest); + fInvalidAncestor = true; + } + if (!fInvalidAncestor) { + // ABC: + // if (fOldExcessive){ + // //One of the blocks is bigger than our consensus rules + // config.UpdateConsensusBlockPolicy(); + // } + return pindexNew; } - if (!fInvalidAncestor) return pindexNew; } while (true); } @@ -2730,7 +2808,7 @@ MemPoolConflictRemovalTracker mrt(mempool); CBlockIndex *pindexOldTip = chainActive.Tip(); if (pindexMostWork == nullptr) { - pindexMostWork = FindMostWorkChain(); + pindexMostWork = FindMostWorkChain(config); } // Whether we have anything to do at all. @@ -2964,6 +3042,7 @@ pindexNew->nDataPos = pos.nPos; pindexNew->nUndoPos = 0; pindexNew->nStatus |= BLOCK_HAVE_DATA; + if (block.fExcessive) pindexNew->nStatus |= BLOCK_EXCESSIVE; pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS); setDirtyBlockIndex.insert(pindexNew); @@ -3164,20 +3243,37 @@ } // Size limits. - auto nMaxBlockSize = config.GetMaxBlockSize(); + // auto nMaxBlockSize = config.GetMaxBlockSize(); - // Bail early if there is no way this block is of reasonable size. - if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) { - return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, - "size limits failed"); - } + // // Bail early if there is no way this block is of reasonable size. + // if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) { + // return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, + // "size limits failed"); + // } auto currentBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); - if (currentBlockSize > nMaxBlockSize) { - return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, - "size limits failed"); - } + + block.nBlockSize = currentBlockSize; + + // if (currentBlockSize > nMaxBlockSize) { + // return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, + // "size limits failed"); + // } + + // if (!IsBlockSizeOk(config, block)) { + // // TODO: set a new state for this blocks, we need to revalidate them + // // when our node is upgraded + // // TODO: set a warning message if the chain with a different + // consensus + // // is winning, and check what functions may need to be disabled until + // // the node is upgraded + // // TODO: remove the std::cout + // std::cout << "Block size is in the new state (WontValid or " + // "CantBeValidated), but we want to follow the chain if " + // "it's the one that have the most work" + // << std::endl; + // } // And a valid coinbase. if (!CheckCoinbase(*block.vtx[0], state, false)) { @@ -3230,6 +3326,9 @@ block.fChecked = true; } + // BU: Check whether this block exceeds what we want to relay. + block.fExcessive = config.CheckExcessive(block); + return true; } @@ -4178,10 +4277,11 @@ boost::this_thread::interruption_point(); uiInterface.ShowProgress( _("Verifying blocks..."), - std::max( - 1, std::min(99, 100 - (int)(((double)(chainActive.Height() - - pindex->nHeight)) / - (double)nCheckDepth * 50)))); + std::max(1, + std::min(99, + 100 - (int)(((double)(chainActive.Height() - + pindex->nHeight)) / + (double)nCheckDepth * 50)))); pindex = chainActive.Next(pindex); CBlock block; if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus())) { @@ -4501,6 +4601,15 @@ return nLoaded > 0; } +int chainContainsExcessive(const CBlockIndex *blk, unsigned int goBack = 0) { + if (goBack == 0) goBack = 4 + 6 * 24; + for (unsigned int i = 0; i < goBack; i++, blk = blk->pprev) { + if (!blk) break; // we hit the beginning + if (blk->nStatus & BLOCK_EXCESSIVE) return true; + } + return false; +} + static void CheckBlockIndex(const Consensus::Params &consensusParams) { if (!fCheckBlockIndex) { return; @@ -4664,8 +4773,13 @@ // is valid and we have all data for its parents, it must be in // setBlockIndexCandidates. chainActive.Tip() must also be there // even if some data has been pruned. - if (pindexFirstMissing == nullptr || - pindex == chainActive.Tip()) { + + // if (pindexFirstMissing == nullptr || + // BU: if the chain is excessive it won't be on the list of + // active chain candidates + if ((!chainContainsExcessive(pindex)) && + (pindexFirstMissing == nullptr || + pindex == chainActive.Tip())) { assert(setBlockIndexCandidates.count(pindex)); } // If some parent is missing, then it could be that this block @@ -4704,10 +4818,17 @@ // Can't be in mapBlocksUnlinked if we don't HAVE_DATA assert(!foundInUnlinked); } - if (pindexFirstMissing == nullptr) { - // We aren't missing data for any parent -- cannot be in - // mapBlocksUnlinked. - assert(!foundInUnlinked); + // if (pindexFirstMissing == nullptr) { + // // We aren't missing data for any parent -- cannot be in + // // mapBlocksUnlinked. + // assert(!foundInUnlinked); + // } + + // BU: blocks that are excessive are placed in the unlinked map + if ((pindexFirstMissing == nullptr) && + (!chainContainsExcessive(pindex))) { + assert(!foundInUnlinked); // We aren't missing data for any parent + // -- cannot be in mapBlocksUnlinked. } if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr &&