diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py --- a/qa/pull-tester/rpc-tests.py +++ b/qa/pull-tester/rpc-tests.py @@ -33,7 +33,7 @@ sys.path.append("qa/pull-tester/") from tests_config import * -BOLD = ("","") +BOLD = ("", "") if os.name == 'posix': # primitive formatting on supported # terminal via ANSI escape sequences: @@ -41,19 +41,19 @@ RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/' -#If imported values are not defined then set to zero (or disabled) +# If imported values are not defined then set to zero (or disabled) if 'ENABLE_WALLET' not in vars(): - ENABLE_WALLET=0 + ENABLE_WALLET = 0 if 'ENABLE_BITCOIND' not in vars(): - ENABLE_BITCOIND=0 + ENABLE_BITCOIND = 0 if 'ENABLE_UTILS' not in vars(): - ENABLE_UTILS=0 + ENABLE_UTILS = 0 if 'ENABLE_ZMQ' not in vars(): - ENABLE_ZMQ=0 + ENABLE_ZMQ = 0 -ENABLE_COVERAGE=0 +ENABLE_COVERAGE = 0 -#Create a set to store arguments and create the passon string +# Create a set to store arguments and create the passon string opts = set() passon_args = [] PASSON_REGEX = re.compile("^--") @@ -75,21 +75,24 @@ else: opts.add(arg) -#Set env vars +# Set env vars if "BITCOIND" not in os.environ: os.environ["BITCOIND"] = BUILDDIR + '/src/bitcoind' + EXEEXT if EXEEXT == ".exe" and "-win" not in opts: # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9 # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964 - print("Win tests currently disabled by default. Use -win option to enable") + print( + "Win tests currently disabled by default. Use -win option to enable") sys.exit(0) if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1): - print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled") + print( + "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled") sys.exit(0) -# python3-zmq may not be installed. Handle this gracefully and with some helpful info +# python3-zmq may not be installed. Handle this gracefully and with some +# helpful info if ENABLE_ZMQ: try: import zmq @@ -156,6 +159,7 @@ 'abc-cmdline.py', 'abc-p2p-fullblocktest.py', 'abc-rpc.py', + 'abc-ec.py', 'mempool-accept-txn.py', ] if ENABLE_ZMQ: @@ -221,12 +225,13 @@ # Populate cache subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags) - #Run Tests + # Run Tests max_len_name = len(max(test_list, key=len)) time_sum = 0 time0 = time.time() job_queue = RPCTestHandler(run_parallel, test_list, flags) - results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0] + results = BOLD[1] + "%s | %s | %s\n\n" % ( + "TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0] all_passed = True for _ in range(len(test_list)): (name, stdout, stderr, passed, duration) = job_queue.get_next() @@ -236,9 +241,12 @@ print('\n' + BOLD[1] + name + BOLD[0] + ":") print('' if passed else stdout + '\n', end='') print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='') - results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration) - print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration)) - results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0] + results += "%s | %s | %s s\n" % ( + name.ljust(max_len_name), str(passed).ljust(6), duration) + print("Pass: %s%s%s, Duration: %s s\n" % + (BOLD[1], passed, BOLD[0], duration)) + results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ( + "ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0] print(results) print("\nRuntime: %s s" % (int(time.time() - time0))) @@ -252,6 +260,7 @@ class RPCTestHandler: + """ Trigger the testscrips passed in via the list. """ @@ -273,12 +282,15 @@ # Add tests self.num_running += 1 t = self.test_list.pop(0) - port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)] + port_seed = ["--portseed={}".format( + len(self.test_list) + self.portseed_offset)] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) self.jobs.append((t, time.time(), - subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed, + subprocess.Popen( + (RPC_TESTS_DIR + t).split() + + self.flags + port_seed, universal_newlines=True, stdout=log_stdout, stderr=log_stderr), @@ -293,7 +305,8 @@ (name, time0, proc, log_out, log_err) = j if proc.poll() is not None: log_out.seek(0), log_err.seek(0) - [stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)] + [stdout, stderr] = [l.read().decode('utf-8') + for l in (log_out, log_err)] log_out.close(), log_err.close() passed = stderr == "" and proc.returncode == 0 self.num_running -= 1 @@ -303,6 +316,7 @@ class RPCCoverage(object): + """ Coverage reporting utilities for pull-tester. @@ -317,6 +331,7 @@ See also: qa/rpc-tests/test_framework/coverage.py """ + def __init__(self): self.dir = tempfile.mkdtemp(prefix="coverage") self.flag = '--coveragedir=%s' % self.dir diff --git a/qa/rpc-tests/abc-ec.py b/qa/rpc-tests/abc-ec.py new file mode 100755 --- /dev/null +++ b/qa/rpc-tests/abc-ec.py @@ -0,0 +1,393 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2017 The Bitcoin developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +This test checks the new consensus behavior. +It is derived from the much more complex p2p-fullblocktest. +""" + +# TODO: remove unnecessary code from the fullblocktest + +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import * +from test_framework.comptool import TestManager, TestInstance, RejectResult +from test_framework.blocktools import * +import time +from test_framework.key import CECKey +from test_framework.script import * +from test_framework.cdefs import (ONE_MEGABYTE, LEGACY_MAX_BLOCK_SIZE, + MAX_BLOCK_SIGOPS_PER_MB, MAX_TX_SIGOPS_COUNT) + +import unittest + +# far into the past +UAHF_START_TIME = 30000000 + + +class PreviousSpendableOutput(object): + + def __init__(self, tx=CTransaction(), n=-1): + self.tx = tx + self.n = n # the output we're spending + +# TestNode: A peer we use to send messages to bitcoind, and store responses. + + +class TestNode(SingleNodeConnCB): + + def __init__(self): + self.last_sendcmpct = None + self.last_cmpctblock = None + self.last_getheaders = None + self.last_headers = None + SingleNodeConnCB.__init__(self) + + def on_sendcmpct(self, conn, message): + self.last_sendcmpct = message + + def on_cmpctblock(self, conn, message): + self.last_cmpctblock = message + self.last_cmpctblock.header_and_shortids.header.calc_sha256() + + def on_getheaders(self, conn, message): + self.last_getheaders = message + + def on_headers(self, conn, message): + self.last_headers = message + for x in self.last_headers.headers: + x.calc_sha256() + + def clear_block_data(self): + with mininode_lock: + self.last_sendcmpct = None + self.last_cmpctblock = None + + +class FullBlockTest(ComparisonTestFramework): + + # Can either run this test as 1 node with expected answers, or two and compare them. + # Change the "outcome" variable from each TestInstance object to only do + # the comparison. + + def __init__(self): + super().__init__() + self.excessive_block_size = 8 * ONE_MEGABYTE + self.num_nodes = 4 + self.block_heights = {} + self.coinbase_key = CECKey() + self.coinbase_key.set_secretbytes(b"fatstacks") + self.coinbase_pubkey = self.coinbase_key.get_pubkey() + self.tip = None + self.blocks = {} + + def sync_all(self): + if self.is_network_split: + sync_blocks(self.nodes[:2], timeout=15) + sync_blocks(self.nodes[2:], timeout=15) + sync_mempools(self.nodes[:2]) + sync_mempools(self.nodes[2:]) + else: + sync_blocks(self.nodes) + sync_mempools(self.nodes) + + def setup_network(self): + self.excessive_block_size_2 = 16 * ONE_MEGABYTE + self.is_network_split = True + self.array_opts_node_0 = ['-debug', + '-norelaypriority', + '-whitelist=127.0.0.1', + '-limitancestorcount=9999', + '-limitancestorsize=9999', + '-limitdescendantcount=9999', + '-limitdescendantsize=9999', + '-maxmempool=999', + "-uahfstarttime=%d" % UAHF_START_TIME, + "-excessiveblocksize=%d" + % self.excessive_block_size] + self.array_opts = ['-debug', + '-norelaypriority', + '-whitelist=127.0.0.1', + '-limitancestorcount=9999', + '-limitancestorsize=9999', + '-limitdescendantcount=9999', + '-limitdescendantsize=9999', + '-maxmempool=999', + "-uahfstarttime=%d" % UAHF_START_TIME, + "-excessiveblocksize=%d" + % self.excessive_block_size] + + self.extra_args = [ + self.array_opts_node_0, self.array_opts_node_0, self.array_opts, self.array_opts] + self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, + self.extra_args, + binary=[self.options.testbinary, self.options.testbinary, self.options.testbinary, self.options.testbinary]) + + connect_nodes_bi(self.nodes, 0, 1) + connect_nodes_bi(self.nodes, 2, 3) + self.sync_all() + + def add_options(self, parser): + super().add_options(parser) + parser.add_option( + "--runbarelyexpensive", dest="runbarelyexpensive", default=True) + + def run_test(self): + self.test = TestManager(self, self.options.tmpdir) + self.test.add_all_connections(self.nodes) + # Start up network handling in another thread + NetworkThread().start() + # Set the blocksize to 16MB as initial condition + self.nodes[0].setexcessiveblock(self.excessive_block_size_2) + self.test.run() + + def add_transactions_to_block(self, block, tx_list): + [tx.rehash() for tx in tx_list] + block.vtx.extend(tx_list) + + # this is a little handier to use than the version in blocktools.py + def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): + tx = create_transaction(spend_tx, n, b"", value, script) + return tx + + # sign a transaction, using the key we know about + # this signs input 0 in tx, which is assumed to be spending output n in + # spend_tx + def sign_tx(self, tx, spend_tx, n): + scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) + if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend + tx.vin[0].scriptSig = CScript() + return + sighash = SignatureHashForkId( + spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue) + tx.vin[0].scriptSig = CScript( + [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) + + def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): + tx = self.create_tx(spend_tx, n, value, script) + self.sign_tx(tx, spend_tx, n) + tx.rehash() + return tx + + def next_block(self, number, spend=None, additional_coinbase_value=0, script=None, extra_sigops=0, block_size=0, solve=True, submit=True, base_hash=None, base_time=None, base_height=None): + """ + Create a block on top of self.tip, and advance self.tip to point to the new block + if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend + output, and rest will go to fees. + """ + if self.tip == None: + base_block_hash = self.genesis_hash + block_time = int(time.time()) + 1 + else: + if base_hash == None and base_time == None: + base_block_hash = self.tip.sha256 + block_time = self.tip.nTime + 1 + else: + base_block_hash = base_hash + block_time = base_time + # First create the coinbase + if base_height == None: + height = self.block_heights[base_block_hash] + 1 + else: + height = base_height + coinbase = create_coinbase(height, self.coinbase_pubkey) + coinbase.vout[0].nValue += additional_coinbase_value + if (spend != None): + coinbase.vout[0].nValue += spend.tx.vout[ + spend.n].nValue - 1 # all but one satoshi to fees + coinbase.rehash() + block = create_block(base_block_hash, coinbase, block_time) + spendable_output = None + if (spend != None): + tx = CTransaction() + # no signature yet + tx.vin.append( + CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) + # We put some random data into the first transaction of the chain + # to randomize ids + tx.vout.append( + CTxOut(0, CScript([random.randint(0, 255), OP_DROP, OP_TRUE]))) + if script == None: + tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) + else: + tx.vout.append(CTxOut(1, script)) + spendable_output = PreviousSpendableOutput(tx, 0) + + # Now sign it if necessary + scriptSig = b"" + scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey) + if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend + scriptSig = CScript([OP_TRUE]) + else: + # We have to actually sign it + sighash = SignatureHashForkId( + spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend.tx.vout[spend.n].nValue) + scriptSig = CScript( + [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) + tx.vin[0].scriptSig = scriptSig + # Now add the transaction to the block + self.add_transactions_to_block(block, [tx]) + block.hashMerkleRoot = block.calc_merkle_root() + if spendable_output != None and block_size > 0: + while len(block.serialize()) < block_size: + tx = CTransaction() + script_length = block_size - len(block.serialize()) - 79 + if script_length > 510000: + script_length = 500000 + tx_sigops = min( + extra_sigops, script_length, MAX_TX_SIGOPS_COUNT) + extra_sigops -= tx_sigops + script_pad_len = script_length - tx_sigops + script_output = CScript( + [b'\x00' * script_pad_len] + [OP_CHECKSIG] * tx_sigops) + tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) + tx.vout.append(CTxOut(0, script_output)) + tx.vin.append( + CTxIn(COutPoint(spendable_output.tx.sha256, spendable_output.n))) + spendable_output = PreviousSpendableOutput(tx, 0) + self.add_transactions_to_block(block, [tx]) + block.hashMerkleRoot = block.calc_merkle_root() + # Make sure the math above worked out to produce the correct block size + # (the math will fail if there are too many transactions in the block) + assert_equal(len(block.serialize()), block_size) + # Make sure all the requested sigops have been included + assert_equal(extra_sigops, 0) + if solve: + block.solve() + if submit: + self.tip = block + self.block_heights[block.sha256] = height + assert number not in self.blocks + self.blocks[number] = block + return block + + def get_tests(self): + self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) + self.block_heights[self.genesis_hash] = 0 + spendable_outputs = [] + + # save the current tip so it can be spent by a later block + def save_spendable_output(): + spendable_outputs.append(self.tip) + + # get an output that we previously marked as spendable + def get_spendable_output(): + return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) + + # returns a test case that asserts that the current tip was accepted + def accepted(): + return TestInstance([[self.tip, True]]) + + # returns a test case that asserts that the current tip was rejected + def rejected(reject=None): + if reject is None: + return TestInstance([[self.tip, False]]) + else: + return TestInstance([[self.tip, reject]]) + + # move the tip back to a previous block + def tip(number): + self.tip = self.blocks[number] + + # adds transactions to the block and updates state + def update_block(block_number, new_transactions): + block = self.blocks[block_number] + self.add_transactions_to_block(block, new_transactions) + old_sha256 = block.sha256 + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + # Update the internal state just like in next_block + self.tip = block + if block.sha256 != old_sha256: + self.block_heights[ + block.sha256] = self.block_heights[old_sha256] + del self.block_heights[old_sha256] + self.blocks[block_number] = block + return block + + # shorthand for functions + block = self.next_block + + # Create a new block + block(0) + save_spendable_output() + yield accepted() + + # Now we need that block to mature so we can spend the coinbase. + test = TestInstance(sync_every_block=False) + for i in range(99): + block(5000 + i) + test.blocks_and_transactions.append([self.tip, True]) + save_spendable_output() + yield test + + # collect spendable outputs now to avoid cluttering the code later on + out = [] + for i in range(100): + out.append(get_spendable_output()) + + # Let's build some blocks and test them. + for i in range(8): + n = i + 1 + block(n, spend=out[i], block_size=n * ONE_MEGABYTE) + yield accepted() + + # block of maximal size + block(9, spend=out[8], block_size=self.excessive_block_size) + yield accepted() + + # Consensus code, the network is split: + # Node 0 and 1 are connected + # Node 2 and 3 are connected + assert_equal(self.nodes[0].getblockcount(), 109) + assert_equal(self.nodes[1].getblockcount(), 109) + assert_equal(self.nodes[2].getblockcount(), 109) + assert_equal(self.nodes[3].getblockcount(), 109) + + # Node 2 and 3 will continue mining the chain + new_blocks = self.nodes[2].generate(40) + self.sync_all() + assert_equal(self.nodes[0].getblockcount(), 109) + assert_equal(self.nodes[1].getblockcount(), 109) + assert_equal(self.nodes[2].getblockcount(), 149) + assert_equal(self.nodes[3].getblockcount(), 149) + node_2_chain_hash = self.nodes[2].getbestblockhash() + + # Node 0 will follow a chain that have bigger blocks than the + # exessive_block_size (This is possible because of the ec-policy) + base_block_hash = self.tip.sha256 + block_time = self.tip.nTime + 1 + block_height = self.block_heights[base_block_hash] + 1 + block_temp = block( + 10, spend=out[9], block_size=self.excessive_block_size + 100, solve=False, submit=False, base_hash=base_block_hash, base_time=block_time, base_height=block_height) + block_temp.solve() + self.nodes[0].submitblock(ToHex(block_temp)) + # The block was submited + assert_equal(self.nodes[0].getblockcount(), 110) + + self.nodes[0].generate(1) + assert_equal(self.nodes[0].getblockcount(), 111) + big_blocks_hash = self.nodes[0].getbestblockhash() + node_1_last_hash = self.nodes[1].getbestblockhash() + assert_equal(self.nodes[1].getblockcount(), 109) + + # Node 0 and 1 can not sync because they have diferent + # exessive_block_size + try: + self.sync_all() + except AssertionError: + assert (True) + assert_equal(self.nodes[0].getbestblockhash(), big_blocks_hash) + assert_equal(self.nodes[1].getbestblockhash(), node_1_last_hash) + assert_equal(self.nodes[1].getblockcount(), 109) + + self.nodes[0].generate(4) + big_blocks_hash = self.nodes[0].getbestblockhash() + self.sync_all() + assert_equal(self.nodes[0].getbestblockhash(), big_blocks_hash) + assert_equal(self.nodes[1].getbestblockhash(), big_blocks_hash) + + +if __name__ == '__main__': + FullBlockTest().main() diff --git a/src/config.h b/src/config.h --- a/src/config.h +++ b/src/config.h @@ -5,8 +5,8 @@ #ifndef BITCOIN_CONFIG_H #define BITCOIN_CONFIG_H +#include "chain.h" #include - #include class CChainParams; @@ -18,15 +18,29 @@ virtual bool SetUAHFStartTime(int64_t uahfStartTime) = 0; virtual int64_t GetUAHFStartTime() const = 0; virtual const CChainParams &GetChainParams() const = 0; + + virtual bool + CheckBlockWithCurrentConsensus(const CBlockIndex *blockIndex) const = 0; + virtual CBlockIndex *SelectBestChain( + const std::pair ¬YetValidChain, + const std::pair &validChain) const = 0; }; class GlobalConfig final : public Config { public: + GlobalConfig(); + bool SetMaxBlockSize(uint64_t maxBlockSize); uint64_t GetMaxBlockSize() const; bool SetUAHFStartTime(int64_t uahfStartTime); int64_t GetUAHFStartTime() const; const CChainParams &GetChainParams() const; + + // EC Functions + bool CheckBlockWithCurrentConsensus(const CBlockIndex *blockIndex) const; + CBlockIndex *SelectBestChain( + const std::pair ¬YetValidChain, + const std::pair &validChain) const; }; // Temporary woraround. diff --git a/src/config.cpp b/src/config.cpp --- a/src/config.cpp +++ b/src/config.cpp @@ -7,6 +7,10 @@ #include "consensus/consensus.h" #include "globals.h" +#include "validation.h" + +GlobalConfig::GlobalConfig() {} + bool GlobalConfig::SetMaxBlockSize(uint64_t maxBlockSize) { // Do not allow maxBlockSize to be set below historic 1MB limit // It cannot be equal either because of the "must be big" UAHF rule. @@ -31,6 +35,44 @@ return nUAHFStartTime; } +// EC Functions +bool GlobalConfig::CheckBlockWithCurrentConsensus( + const CBlockIndex *blockIndex) const { + // Policy checks, the checks that were removed from the checkblock function + // must be validated here + // TODO: move the rest of the CheckBlock checks here + CBlock block; + if (!ReadBlockFromDisk(block, blockIndex, Params().GetConsensus())) { + return false; + } + + // Size limits. + auto nMaxBlockSize = GetMaxBlockSize(); + // Bail early if there is no way this block is of reasonable size. + if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) { + return false; + } + auto currentBlockSize = + ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); + if (currentBlockSize > nMaxBlockSize) { + return false; + } + return true; +} + +CBlockIndex *GlobalConfig::SelectBestChain( + const std::pair ¬YetValidChain, + const std::pair &validChain) const { + // The first element is the tip of the chain and the second one is the last + // element before joining the current chain + // TODO: make the selection of the best chain something more complex + if (notYetValidChain.first->nChainWork > validChain.first->nChainWork + 4) { + return notYetValidChain.first; + } else { + return validChain.first; + } +} + const CChainParams &GlobalConfig::GetChainParams() const { return Params(); } diff --git a/src/validation.cpp b/src/validation.cpp --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2213,8 +2213,8 @@ nLastSetChain = nNow; } } catch (const std::runtime_error &e) { - return AbortNode(state, std::string("System error while flushing: ") + - e.what()); + return AbortNode( + state, std::string("System error while flushing: ") + e.what()); } return true; } @@ -2481,15 +2481,43 @@ * Return the tip of the chain with the most work in it, that isn't known to be * invalid (it's however far from certain to be valid). */ -static CBlockIndex *FindMostWorkChain() { +/** + * EC: If a possible valid chain is found, this will compare it to the best + * valid chain and evaluate if it should allow it. +*/ +static CBlockIndex *FindMostWorkChain(const Config &config) { + // Variables to save the not valid yet chain + std::pair notValidYetChain; + bool fFoundECChain = false; + int iterator_offset = 0; do { CBlockIndex *pindexNew = nullptr; - // Find the best candidate header. { std::set::reverse_iterator it = setBlockIndexCandidates.rbegin(); - if (it == setBlockIndexCandidates.rend()) return nullptr; + + if (fFoundECChain) { + // The best chain is not valid yet chain, so the iterator will + // look for a valid chain to compare. In case there is no valid + // chain, it will compare with the active chain tip. + int i = 0; + while (i < iterator_offset) { + it++; + i++; + // If the others chains have missing data or have bad blocks + // their blocks are removed + if (it == setBlockIndexCandidates.rend()) { + return config.SelectBestChain( + notValidYetChain, + std::make_pair(chainActive.Tip(), + chainActive.Tip())); + } + } + } else { + if (it == setBlockIndexCandidates.rend()) return nullptr; + } + pindexNew = *it; } @@ -2498,6 +2526,7 @@ // is an optimization, as we know all blocks in it are valid already. CBlockIndex *pindexTest = pindexNew; bool fInvalidAncestor = false; + bool fCurrentChainIsEC = false; while (pindexTest && !chainActive.Contains(pindexTest)) { assert(pindexTest->nChainTx || pindexTest->nHeight == 0); @@ -2507,6 +2536,26 @@ // to a chain unless we have all the non-active-chain parent blocks. bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK; bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA); + + // The index status is ok. Now the block is validated using the + // consensus rules. + if (!fMissingData && !fFailedChain && + !config.CheckBlockWithCurrentConsensus(pindexTest)) { + fCurrentChainIsEC = true; + if (fFoundECChain) { + // If there is already one chain with a not valid yet block, + // the others not valid yet chains are discarted because + // only the one with most work should be evaluated. + // (Subchains of the best one are also chains, that's why + // this items are not being removed) + ++iterator_offset; + // Setting invalid ancestor as true will make the code to + // not return this chain + fInvalidAncestor = true; + break; + } + } + if (fFailedChain || fMissingData) { // Candidate chain is not usable (either invalid or missing // data) @@ -2536,7 +2585,26 @@ } pindexTest = pindexTest->pprev; } - if (!fInvalidAncestor) return pindexNew; + + if (!fInvalidAncestor) { + if (fCurrentChainIsEC && !fFoundECChain) { + // Current chain is a not valid yet chain. Save it to later + // compare it with a valid chain + fFoundECChain = true; + notValidYetChain = + std::make_pair(std::move(pindexNew), std::move(pindexTest)); + // Set the iterator to the next element to look for a valid + // chain + ++iterator_offset; + } else if (fFoundECChain) { + // There is a not valid yet chain and a valid chain + return config.SelectBestChain( + notValidYetChain, std::make_pair(pindexNew, pindexTest)); + } else { + // The best chain is a valid one + return pindexNew; + } + } } while (true); } @@ -2705,7 +2773,7 @@ MemPoolConflictRemovalTracker mrt(mempool); CBlockIndex *pindexOldTip = chainActive.Tip(); if (pindexMostWork == nullptr) { - pindexMostWork = FindMostWorkChain(); + pindexMostWork = FindMostWorkChain(config); } // Whether we have anything to do at all. @@ -3139,20 +3207,21 @@ } // Size limits. - auto nMaxBlockSize = config.GetMaxBlockSize(); + // auto nMaxBlockSize = config.GetMaxBlockSize(); - // Bail early if there is no way this block is of reasonable size. - if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) { - return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, - "size limits failed"); - } + // // Bail early if there is no way this block is of reasonable size. + // if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) { + // return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, + // "size limits failed"); + // } auto currentBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); - if (currentBlockSize > nMaxBlockSize) { - return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, - "size limits failed"); - } + + // if (currentBlockSize > nMaxBlockSize) { + // return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, + // "size limits failed"); + // } // And a valid coinbase. if (!CheckCoinbase(*block.vtx[0], state, false)) { @@ -4153,10 +4222,11 @@ boost::this_thread::interruption_point(); uiInterface.ShowProgress( _("Verifying blocks..."), - std::max( - 1, std::min(99, 100 - (int)(((double)(chainActive.Height() - - pindex->nHeight)) / - (double)nCheckDepth * 50)))); + std::max(1, + std::min(99, + 100 - (int)(((double)(chainActive.Height() - + pindex->nHeight)) / + (double)nCheckDepth * 50)))); pindex = chainActive.Next(pindex); CBlock block; if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus())) {