diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py --- a/qa/pull-tester/rpc-tests.py +++ b/qa/pull-tester/rpc-tests.py @@ -33,7 +33,7 @@ sys.path.append("qa/pull-tester/") from tests_config import * -BOLD = ("","") +BOLD = ("", "") if os.name == 'posix': # primitive formatting on supported # terminal via ANSI escape sequences: @@ -41,19 +41,19 @@ RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/' -#If imported values are not defined then set to zero (or disabled) +# If imported values are not defined then set to zero (or disabled) if 'ENABLE_WALLET' not in vars(): - ENABLE_WALLET=0 + ENABLE_WALLET = 0 if 'ENABLE_BITCOIND' not in vars(): - ENABLE_BITCOIND=0 + ENABLE_BITCOIND = 0 if 'ENABLE_UTILS' not in vars(): - ENABLE_UTILS=0 + ENABLE_UTILS = 0 if 'ENABLE_ZMQ' not in vars(): - ENABLE_ZMQ=0 + ENABLE_ZMQ = 0 -ENABLE_COVERAGE=0 +ENABLE_COVERAGE = 0 -#Create a set to store arguments and create the passon string +# Create a set to store arguments and create the passon string opts = set() passon_args = [] PASSON_REGEX = re.compile("^--") @@ -75,21 +75,24 @@ else: opts.add(arg) -#Set env vars +# Set env vars if "BITCOIND" not in os.environ: os.environ["BITCOIND"] = BUILDDIR + '/src/bitcoind' + EXEEXT if EXEEXT == ".exe" and "-win" not in opts: # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9 # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964 - print("Win tests currently disabled by default. Use -win option to enable") + print( + "Win tests currently disabled by default. Use -win option to enable") sys.exit(0) if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1): - print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled") + print( + "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled") sys.exit(0) -# python3-zmq may not be installed. Handle this gracefully and with some helpful info +# python3-zmq may not be installed. Handle this gracefully and with some +# helpful info if ENABLE_ZMQ: try: import zmq @@ -157,6 +160,7 @@ 'abc-p2p-activation.py', 'abc-p2p-fullblocktest.py', 'abc-rpc.py', + 'abc-ec.py', 'mempool-accept-txn.py', ] if ENABLE_ZMQ: @@ -222,12 +226,13 @@ # Populate cache subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags) - #Run Tests + # Run Tests max_len_name = len(max(test_list, key=len)) time_sum = 0 time0 = time.time() job_queue = RPCTestHandler(run_parallel, test_list, flags) - results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0] + results = BOLD[1] + "%s | %s | %s\n\n" % ( + "TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0] all_passed = True for _ in range(len(test_list)): (name, stdout, stderr, passed, duration) = job_queue.get_next() @@ -237,9 +242,12 @@ print('\n' + BOLD[1] + name + BOLD[0] + ":") print('' if passed else stdout + '\n', end='') print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='') - results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration) - print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration)) - results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0] + results += "%s | %s | %s s\n" % ( + name.ljust(max_len_name), str(passed).ljust(6), duration) + print("Pass: %s%s%s, Duration: %s s\n" % + (BOLD[1], passed, BOLD[0], duration)) + results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ( + "ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0] print(results) print("\nRuntime: %s s" % (int(time.time() - time0))) @@ -253,6 +261,7 @@ class RPCTestHandler: + """ Trigger the testscrips passed in via the list. """ @@ -274,12 +283,15 @@ # Add tests self.num_running += 1 t = self.test_list.pop(0) - port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)] + port_seed = ["--portseed={}".format( + len(self.test_list) + self.portseed_offset)] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) self.jobs.append((t, time.time(), - subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed, + subprocess.Popen( + (RPC_TESTS_DIR + t).split() + + self.flags + port_seed, universal_newlines=True, stdout=log_stdout, stderr=log_stderr), @@ -294,7 +306,8 @@ (name, time0, proc, log_out, log_err) = j if proc.poll() is not None: log_out.seek(0), log_err.seek(0) - [stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)] + [stdout, stderr] = [l.read().decode('utf-8') + for l in (log_out, log_err)] log_out.close(), log_err.close() passed = stderr == "" and proc.returncode == 0 self.num_running -= 1 @@ -304,6 +317,7 @@ class RPCCoverage(object): + """ Coverage reporting utilities for pull-tester. @@ -318,6 +332,7 @@ See also: qa/rpc-tests/test_framework/coverage.py """ + def __init__(self): self.dir = tempfile.mkdtemp(prefix="coverage") self.flag = '--coveragedir=%s' % self.dir diff --git a/qa/rpc-tests/abc-ec.py b/qa/rpc-tests/abc-ec.py new file mode 100755 --- /dev/null +++ b/qa/rpc-tests/abc-ec.py @@ -0,0 +1,393 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2017 The Bitcoin developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +This test checks the new consensus behavior. +It is derived from the much more complex p2p-fullblocktest. +""" + +# TODO: remove unnecessary code from the fullblocktest + +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import * +from test_framework.comptool import TestManager, TestInstance, RejectResult +from test_framework.blocktools import * +import time +from test_framework.key import CECKey +from test_framework.script import * +from test_framework.cdefs import (ONE_MEGABYTE, LEGACY_MAX_BLOCK_SIZE, + MAX_BLOCK_SIGOPS_PER_MB, MAX_TX_SIGOPS_COUNT) + +import unittest + +# far into the past +UAHF_START_TIME = 30000000 + + +class PreviousSpendableOutput(object): + + def __init__(self, tx=CTransaction(), n=-1): + self.tx = tx + self.n = n # the output we're spending + +# TestNode: A peer we use to send messages to bitcoind, and store responses. + + +class TestNode(SingleNodeConnCB): + + def __init__(self): + self.last_sendcmpct = None + self.last_cmpctblock = None + self.last_getheaders = None + self.last_headers = None + SingleNodeConnCB.__init__(self) + + def on_sendcmpct(self, conn, message): + self.last_sendcmpct = message + + def on_cmpctblock(self, conn, message): + self.last_cmpctblock = message + self.last_cmpctblock.header_and_shortids.header.calc_sha256() + + def on_getheaders(self, conn, message): + self.last_getheaders = message + + def on_headers(self, conn, message): + self.last_headers = message + for x in self.last_headers.headers: + x.calc_sha256() + + def clear_block_data(self): + with mininode_lock: + self.last_sendcmpct = None + self.last_cmpctblock = None + + +class FullBlockTest(ComparisonTestFramework): + + # Can either run this test as 1 node with expected answers, or two and compare them. + # Change the "outcome" variable from each TestInstance object to only do + # the comparison. + + def __init__(self): + super().__init__() + self.excessive_block_size = 8 * ONE_MEGABYTE + self.num_nodes = 4 + self.block_heights = {} + self.coinbase_key = CECKey() + self.coinbase_key.set_secretbytes(b"fatstacks") + self.coinbase_pubkey = self.coinbase_key.get_pubkey() + self.tip = None + self.blocks = {} + + def sync_all(self): + if self.is_network_split: + sync_blocks(self.nodes[:2], timeout=15) + sync_blocks(self.nodes[2:], timeout=15) + sync_mempools(self.nodes[:2]) + sync_mempools(self.nodes[2:]) + else: + sync_blocks(self.nodes) + sync_mempools(self.nodes) + + def setup_network(self): + self.excessive_block_size_2 = 16 * ONE_MEGABYTE + self.is_network_split = True + self.array_opts_node_0 = ['-debug', + '-norelaypriority', + '-whitelist=127.0.0.1', + '-limitancestorcount=9999', + '-limitancestorsize=9999', + '-limitdescendantcount=9999', + '-limitdescendantsize=9999', + '-maxmempool=999', + "-uahfstarttime=%d" % UAHF_START_TIME, + "-excessiveblocksize=%d" + % self.excessive_block_size] + self.array_opts = ['-debug', + '-norelaypriority', + '-whitelist=127.0.0.1', + '-limitancestorcount=9999', + '-limitancestorsize=9999', + '-limitdescendantcount=9999', + '-limitdescendantsize=9999', + '-maxmempool=999', + "-uahfstarttime=%d" % UAHF_START_TIME, + "-excessiveblocksize=%d" + % self.excessive_block_size] + + self.extra_args = [ + self.array_opts_node_0, self.array_opts_node_0, self.array_opts, self.array_opts] + self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, + self.extra_args, + binary=[self.options.testbinary, self.options.testbinary, self.options.testbinary, self.options.testbinary]) + + connect_nodes_bi(self.nodes, 0, 1) + connect_nodes_bi(self.nodes, 2, 3) + self.sync_all() + + def add_options(self, parser): + super().add_options(parser) + parser.add_option( + "--runbarelyexpensive", dest="runbarelyexpensive", default=True) + + def run_test(self): + self.test = TestManager(self, self.options.tmpdir) + self.test.add_all_connections(self.nodes) + # Start up network handling in another thread + NetworkThread().start() + # Set the blocksize to 16MB as initial condition + self.nodes[0].setexcessiveblock(self.excessive_block_size_2) + self.test.run() + + def add_transactions_to_block(self, block, tx_list): + [tx.rehash() for tx in tx_list] + block.vtx.extend(tx_list) + + # this is a little handier to use than the version in blocktools.py + def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): + tx = create_transaction(spend_tx, n, b"", value, script) + return tx + + # sign a transaction, using the key we know about + # this signs input 0 in tx, which is assumed to be spending output n in + # spend_tx + def sign_tx(self, tx, spend_tx, n): + scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) + if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend + tx.vin[0].scriptSig = CScript() + return + sighash = SignatureHashForkId( + spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue) + tx.vin[0].scriptSig = CScript( + [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) + + def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): + tx = self.create_tx(spend_tx, n, value, script) + self.sign_tx(tx, spend_tx, n) + tx.rehash() + return tx + + def next_block(self, number, spend=None, additional_coinbase_value=0, script=None, extra_sigops=0, block_size=0, solve=True, submit=True, base_hash=None, base_time=None, base_height=None): + """ + Create a block on top of self.tip, and advance self.tip to point to the new block + if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend + output, and rest will go to fees. + """ + if self.tip == None: + base_block_hash = self.genesis_hash + block_time = int(time.time()) + 1 + else: + if base_hash == None and base_time == None: + base_block_hash = self.tip.sha256 + block_time = self.tip.nTime + 1 + else: + base_block_hash = base_hash + block_time = base_time + # First create the coinbase + if base_height == None: + height = self.block_heights[base_block_hash] + 1 + else: + height = base_height + coinbase = create_coinbase(height, self.coinbase_pubkey) + coinbase.vout[0].nValue += additional_coinbase_value + if (spend != None): + coinbase.vout[0].nValue += spend.tx.vout[ + spend.n].nValue - 1 # all but one satoshi to fees + coinbase.rehash() + block = create_block(base_block_hash, coinbase, block_time) + spendable_output = None + if (spend != None): + tx = CTransaction() + # no signature yet + tx.vin.append( + CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) + # We put some random data into the first transaction of the chain + # to randomize ids + tx.vout.append( + CTxOut(0, CScript([random.randint(0, 255), OP_DROP, OP_TRUE]))) + if script == None: + tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) + else: + tx.vout.append(CTxOut(1, script)) + spendable_output = PreviousSpendableOutput(tx, 0) + + # Now sign it if necessary + scriptSig = b"" + scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey) + if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend + scriptSig = CScript([OP_TRUE]) + else: + # We have to actually sign it + sighash = SignatureHashForkId( + spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend.tx.vout[spend.n].nValue) + scriptSig = CScript( + [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) + tx.vin[0].scriptSig = scriptSig + # Now add the transaction to the block + self.add_transactions_to_block(block, [tx]) + block.hashMerkleRoot = block.calc_merkle_root() + if spendable_output != None and block_size > 0: + while len(block.serialize()) < block_size: + tx = CTransaction() + script_length = block_size - len(block.serialize()) - 79 + if script_length > 510000: + script_length = 500000 + tx_sigops = min( + extra_sigops, script_length, MAX_TX_SIGOPS_COUNT) + extra_sigops -= tx_sigops + script_pad_len = script_length - tx_sigops + script_output = CScript( + [b'\x00' * script_pad_len] + [OP_CHECKSIG] * tx_sigops) + tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) + tx.vout.append(CTxOut(0, script_output)) + tx.vin.append( + CTxIn(COutPoint(spendable_output.tx.sha256, spendable_output.n))) + spendable_output = PreviousSpendableOutput(tx, 0) + self.add_transactions_to_block(block, [tx]) + block.hashMerkleRoot = block.calc_merkle_root() + # Make sure the math above worked out to produce the correct block size + # (the math will fail if there are too many transactions in the block) + assert_equal(len(block.serialize()), block_size) + # Make sure all the requested sigops have been included + assert_equal(extra_sigops, 0) + if solve: + block.solve() + if submit: + self.tip = block + self.block_heights[block.sha256] = height + assert number not in self.blocks + self.blocks[number] = block + return block + + def get_tests(self): + self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) + self.block_heights[self.genesis_hash] = 0 + spendable_outputs = [] + + # save the current tip so it can be spent by a later block + def save_spendable_output(): + spendable_outputs.append(self.tip) + + # get an output that we previously marked as spendable + def get_spendable_output(): + return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) + + # returns a test case that asserts that the current tip was accepted + def accepted(): + return TestInstance([[self.tip, True]]) + + # returns a test case that asserts that the current tip was rejected + def rejected(reject=None): + if reject is None: + return TestInstance([[self.tip, False]]) + else: + return TestInstance([[self.tip, reject]]) + + # move the tip back to a previous block + def tip(number): + self.tip = self.blocks[number] + + # adds transactions to the block and updates state + def update_block(block_number, new_transactions): + block = self.blocks[block_number] + self.add_transactions_to_block(block, new_transactions) + old_sha256 = block.sha256 + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + # Update the internal state just like in next_block + self.tip = block + if block.sha256 != old_sha256: + self.block_heights[ + block.sha256] = self.block_heights[old_sha256] + del self.block_heights[old_sha256] + self.blocks[block_number] = block + return block + + # shorthand for functions + block = self.next_block + + # Create a new block + block(0) + save_spendable_output() + yield accepted() + + # Now we need that block to mature so we can spend the coinbase. + test = TestInstance(sync_every_block=False) + for i in range(99): + block(5000 + i) + test.blocks_and_transactions.append([self.tip, True]) + save_spendable_output() + yield test + + # collect spendable outputs now to avoid cluttering the code later on + out = [] + for i in range(100): + out.append(get_spendable_output()) + + # Let's build some blocks and test them. + for i in range(8): + n = i + 1 + block(n, spend=out[i], block_size=n * ONE_MEGABYTE) + yield accepted() + + # block of maximal size + block(9, spend=out[8], block_size=self.excessive_block_size) + yield accepted() + + # Consensus code, the network is split: + # Node 0 and 1 are connected + # Node 2 and 3 are connected + assert_equal(self.nodes[0].getblockcount(), 109) + assert_equal(self.nodes[1].getblockcount(), 109) + assert_equal(self.nodes[2].getblockcount(), 109) + assert_equal(self.nodes[3].getblockcount(), 109) + + # Node 2 and 3 will continue mining the chain + new_blocks = self.nodes[2].generate(40) + self.sync_all() + assert_equal(self.nodes[0].getblockcount(), 109) + assert_equal(self.nodes[1].getblockcount(), 109) + assert_equal(self.nodes[2].getblockcount(), 149) + assert_equal(self.nodes[3].getblockcount(), 149) + node_2_chain_hash = self.nodes[2].getbestblockhash() + + # Node 0 will follow a chain that have bigger blocks than the + # exessive_block_size (This is possible because of the ec-policy) + base_block_hash = self.tip.sha256 + block_time = self.tip.nTime + 1 + block_height = self.block_heights[base_block_hash] + 1 + block_temp = block( + 10, spend=out[9], block_size=self.excessive_block_size + 100, solve=False, submit=False, base_hash=base_block_hash, base_time=block_time, base_height=block_height) + block_temp.solve() + self.nodes[0].submitblock(ToHex(block_temp)) + # The block was submited + assert_equal(self.nodes[0].getblockcount(), 110) + + self.nodes[0].generate(2) + assert_equal(self.nodes[0].getblockcount(), 112) + big_blocks_hash = self.nodes[0].getbestblockhash() + node_1_last_hash = self.nodes[1].getbestblockhash() + assert_equal(self.nodes[1].getblockcount(), 109) + + # Node 0 and 1 can not sync because they have diferent + # exessive_block_size + try: + self.sync_all() + except AssertionError: + assert (True) + assert_equal(self.nodes[0].getbestblockhash(), big_blocks_hash) + assert_equal(self.nodes[1].getbestblockhash(), node_1_last_hash) + assert_equal(self.nodes[1].getblockcount(), 109) + + self.nodes[0].generate(4) + big_blocks_hash = self.nodes[0].getbestblockhash() + self.sync_all() + assert_equal(self.nodes[0].getbestblockhash(), big_blocks_hash) + assert_equal(self.nodes[1].getbestblockhash(), big_blocks_hash) + + +if __name__ == '__main__': + FullBlockTest().main() diff --git a/src/Makefile.am b/src/Makefile.am --- a/src/Makefile.am +++ b/src/Makefile.am @@ -119,6 +119,8 @@ noui.h \ policy/fees.h \ policy/policy.h \ + policy/ec/blocksizepolicyinterface.h \ + policy/ec/ecblocksize.h \ pow.h \ protocol.h \ random.h \ @@ -199,6 +201,7 @@ noui.cpp \ policy/fees.cpp \ policy/policy.cpp \ + policy/ec/ecblocksize.cpp \ pow.cpp \ rest.cpp \ rpc/abc.cpp \ diff --git a/src/chain.h b/src/chain.h --- a/src/chain.h +++ b/src/chain.h @@ -145,7 +145,6 @@ //!< undo data available in rev*.dat BLOCK_HAVE_UNDO = 16, BLOCK_HAVE_MASK = BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO, - //!< stage after last reached validness failed BLOCK_FAILED_VALID = 32, //!< descends from failed block @@ -199,6 +198,9 @@ //! necessary; won't happen before 2030 unsigned int nChainTx; + //! (memory only) blocksize + unsigned int nBlockSize; + //! Verification status of this block. See enum BlockStatus unsigned int nStatus; @@ -227,6 +229,7 @@ nChainWork = arith_uint256(); nTx = 0; nChainTx = 0; + nBlockSize = 0; nStatus = 0; nSequenceId = 0; nTimeMax = 0; diff --git a/src/config.h b/src/config.h --- a/src/config.h +++ b/src/config.h @@ -5,8 +5,9 @@ #ifndef BITCOIN_CONFIG_H #define BITCOIN_CONFIG_H +#include "chain.h" +#include "policy/ec/blocksizepolicyinterface.h" #include - #include class CChainParams; @@ -18,15 +19,30 @@ virtual bool SetUAHFStartTime(int64_t uahfStartTime) = 0; virtual int64_t GetUAHFStartTime() const = 0; virtual const CChainParams &GetChainParams() const = 0; + + virtual bool IsBlockValid(const CBlock &block) const = 0; + virtual bool UpdateConsensus( + const std::pair ¬YetValidChain, + const std::pair &validChain) const = 0; }; class GlobalConfig final : public Config { public: + GlobalConfig(); + bool SetMaxBlockSize(uint64_t maxBlockSize); uint64_t GetMaxBlockSize() const; bool SetUAHFStartTime(int64_t uahfStartTime); int64_t GetUAHFStartTime() const; const CChainParams &GetChainParams() const; + + bool IsBlockValid(const CBlock &block) const; + bool UpdateConsensus( + const std::pair ¬YetValidChain, + const std::pair &validChain) const; + +private: + std::unique_ptr blockPolicy; }; // Temporary woraround. diff --git a/src/config.cpp b/src/config.cpp --- a/src/config.cpp +++ b/src/config.cpp @@ -6,6 +6,29 @@ #include "chainparams.h" #include "consensus/consensus.h" #include "globals.h" +#include "policy/ec/ecblocksize.h" + +#include "validation.h" + +GlobalConfig::GlobalConfig() + : blockPolicy(new ECBlockSize(DEFAULT_MAX_BLOCK_SIZE)) {} + +bool GlobalConfig::IsBlockValid(const CBlock &block) const { + if (blockPolicy != nullptr) { + return blockPolicy->isBlockValid(block); + } + return false; +} + +bool GlobalConfig::UpdateConsensus( + const std::pair ¬YetValidChain, + const std::pair &validChain) const { + if (blockPolicy != nullptr) { + return blockPolicy->UpdateConsensus(notYetValidChain, validChain); + } + + return false; +}; bool GlobalConfig::SetMaxBlockSize(uint64_t maxBlockSize) { // Do not allow maxBlockSize to be set below historic 1MB limit @@ -13,12 +36,19 @@ if (maxBlockSize <= LEGACY_MAX_BLOCK_SIZE) { return false; } + if (blockPolicy != nullptr) { + blockPolicy->SetMaxBlockSize(maxBlockSize); + } nMaxBlockSize = maxBlockSize; return true; } uint64_t GlobalConfig::GetMaxBlockSize() const { + // assert(blockSizePolicy); + if (blockPolicy != nullptr) { + return blockPolicy->GetMaxBlockSize(); + } return nMaxBlockSize; } diff --git a/src/policy/ec/blocksizepolicyinterface.h b/src/policy/ec/blocksizepolicyinterface.h new file mode 100644 --- /dev/null +++ b/src/policy/ec/blocksizepolicyinterface.h @@ -0,0 +1,25 @@ +// Copyright (c) 2017 Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +#ifndef BITCOIN_BLOCKSIZEPOLICYINTERFACE_H +#define BITCOIN_BLOCKSIZEPOLICYINTERFACE_H + +#include "chain.h" +#include + +class BlockSizePolicyInterface { +public: + // Hard limit for maximum block size. + virtual uint64_t GetMaxBlockSize() const = 0; + virtual void SetMaxBlockSize(uint64_t maxBlockSize) = 0; + virtual void updateConsensus() = 0; + + virtual bool isBlockValid(const CBlock &block) const = 0; + virtual bool UpdateConsensus( + const std::pair ¬YetValidChain, + const std::pair &validChain) const = 0; + + ~BlockSizePolicyInterface(){}; +}; + +#endif \ No newline at end of file diff --git a/src/policy/ec/ecblocksize.h b/src/policy/ec/ecblocksize.h new file mode 100644 --- /dev/null +++ b/src/policy/ec/ecblocksize.h @@ -0,0 +1,28 @@ +// Copyright (c) 2017 Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +#ifndef BITCOIN_ECBLOCKSIZE_H +#define BITCOIN_ECBLOCKSIZE_H + +#include "policy/ec/blocksizepolicyinterface.h" + +class ECBlockSize : public BlockSizePolicyInterface { +public: + ECBlockSize(uint64_t maxBlockSize); + + uint64_t GetMaxBlockSize() const override; + void SetMaxBlockSize(uint64_t maxBlockSize) override; + void updateConsensus() override; + + bool isBlockValid(const CBlock &block) const; + bool UpdateConsensus( + const std::pair ¬YetValidChain, + const std::pair &validChain) const; + + ~ECBlockSize(); + +private: + mutable uint64_t nMaxBlockSize; +}; + +#endif \ No newline at end of file diff --git a/src/policy/ec/ecblocksize.cpp b/src/policy/ec/ecblocksize.cpp new file mode 100644 --- /dev/null +++ b/src/policy/ec/ecblocksize.cpp @@ -0,0 +1,49 @@ +// Copyright (c) 2017 The Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include "policy/ec/ecblocksize.h" +#include "consensus/consensus.h" +#include "version.h" +#include +#include + +ECBlockSize::~ECBlockSize() {} + +ECBlockSize::ECBlockSize(uint64_t maxBlockSize) : nMaxBlockSize(maxBlockSize) {} + +uint64_t ECBlockSize::GetMaxBlockSize() const { + return nMaxBlockSize; +} + +void ECBlockSize::SetMaxBlockSize(uint64_t maxBlockSize) { + nMaxBlockSize = maxBlockSize; +} + +void ECBlockSize::updateConsensus() { + auto temp = nMaxBlockSize; + nMaxBlockSize = nMaxBlockSize * 2; + std::cout << "maxblocksize was modified from " << temp << " to " + << nMaxBlockSize << std::endl; + // nMaxBlockSize = nMaxBlockSize; +} + +bool ECBlockSize::isBlockValid(const CBlock &block) const { + uint64_t size = + (int)::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); + if (size > nMaxBlockSize) { + return false; + } else { + return true; + } +} + +bool ECBlockSize::UpdateConsensus( + const std::pair ¬YetValidChain, + const std::pair &validChain) const { + if (notYetValidChain.second->nChainWork > validChain.second->nChainWork) { + // TODO: update max size + return true; + } else { + return false; + } +} \ No newline at end of file diff --git a/src/validation.cpp b/src/validation.cpp --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2238,8 +2238,8 @@ nLastSetChain = nNow; } } catch (const std::runtime_error &e) { - return AbortNode(state, std::string("System error while flushing: ") + - e.what()); + return AbortNode( + state, std::string("System error while flushing: ") + e.what()); } return true; } @@ -2502,11 +2502,39 @@ return true; } +CBlockIndex * +SelectBestChain(const std::pair ¬YetValidChain, + const std::pair &validChain, + const Config &config) { + + if (config.UpdateConsensus(notYetValidChain, validChain)) { + // UpdateConsesus returns true if the consensus is updated to accept the + // not yet valid chain + return notYetValidChain.first; + } else { + // Remove not valid yet elements from the indexes + CBlockIndex *temp_start = notYetValidChain.first; + CBlockIndex *temp_end = notYetValidChain.second; + while (temp_end != temp_start) { + mapBlocksUnlinked.insert(std::make_pair(temp_end->pprev, temp_end)); + setBlockIndexCandidates.erase(temp_end); + temp_end = temp_end->pprev; + } + setBlockIndexCandidates.erase(temp_end); + // Return the valid chain + return validChain.first; + } +} + /** * Return the tip of the chain with the most work in it, that isn't known to be * invalid (it's however far from certain to be valid). */ -static CBlockIndex *FindMostWorkChain() { +static CBlockIndex *FindMostWorkChain(const Config &config) { + // Variables to save the not valid yet chain + std::pair notValidYetChain; + bool fDiscoveredANotValidYetChain = false; + do { CBlockIndex *pindexNew = nullptr; @@ -2515,6 +2543,27 @@ std::set::reverse_iterator it = setBlockIndexCandidates.rbegin(); if (it == setBlockIndexCandidates.rend()) return nullptr; + + if (fDiscoveredANotValidYetChain) { + // Ignore not valid yet chain in case we have one + // TODO: fix the bug with this iterator + CBlockIndex *nvy_start = notValidYetChain.first; + CBlockIndex *nvt_end = notValidYetChain.second; + while ((nvt_end != nvy_start) && + (it != setBlockIndexCandidates.rend())) { + if (*it == nvt_end) { + it++; + } + nvt_end = nvt_end->pprev; + } + if (*it == nvt_end) { + it++; + } + if (it == setBlockIndexCandidates.rend()) + return notValidYetChain.first; + } + + // if (it == setBlockIndexCandidates.rend()) return nullptr; pindexNew = *it; } @@ -2523,6 +2572,7 @@ // is an optimization, as we know all blocks in it are valid already. CBlockIndex *pindexTest = pindexNew; bool fInvalidAncestor = false; + bool fCurrentChainIsNotValidYet = false; while (pindexTest && !chainActive.Contains(pindexTest)) { assert(pindexTest->nChainTx || pindexTest->nHeight == 0); @@ -2532,6 +2582,25 @@ // to a chain unless we have all the non-active-chain parent blocks. bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK; bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA); + bool fNotValidYet = false; + + if (!fMissingData && !fFailedChain) { + // If the index is ok, read the block and verify with the policy + // if it is alright + CBlock block; + if (ReadBlockFromDisk(block, pindexTest, + Params().GetConsensus())) { + fNotValidYet = !config.IsBlockValid(block); + } + } + + if (fNotValidYet && fDiscoveredANotValidYetChain) { + // If there are already one chain with a not valid yet block, + // the others not valid yet chains should be discarted. (Only + // the one with most work should be evaluated) + fMissingData = true; + } + if (fFailedChain || fMissingData) { // Candidate chain is not usable (either invalid or missing // data) @@ -2559,9 +2628,28 @@ fInvalidAncestor = true; break; } + if (fNotValidYet) { + // The current chain have a not valid yet element + fCurrentChainIsNotValidYet = true; + } pindexTest = pindexTest->pprev; } - if (!fInvalidAncestor) return pindexNew; + if (!fInvalidAncestor) { + if (fCurrentChainIsNotValidYet && !fDiscoveredANotValidYetChain) { + // Save not valid yet chain and prepare to compare it to a + // legacy chain + fDiscoveredANotValidYetChain = true; + notValidYetChain = + std::make_pair(std::move(pindexNew), std::move(pindexTest)); + } else if (fDiscoveredANotValidYetChain) + // Compare chains + return SelectBestChain(notValidYetChain, + std::make_pair(pindexNew, pindexTest), + config); + else + // The best chain is a legacy one + return pindexNew; + } } while (true); } @@ -2730,7 +2818,7 @@ MemPoolConflictRemovalTracker mrt(mempool); CBlockIndex *pindexOldTip = chainActive.Tip(); if (pindexMostWork == nullptr) { - pindexMostWork = FindMostWorkChain(); + pindexMostWork = FindMostWorkChain(config); } // Whether we have anything to do at all. @@ -3164,20 +3252,21 @@ } // Size limits. - auto nMaxBlockSize = config.GetMaxBlockSize(); + // auto nMaxBlockSize = config.GetMaxBlockSize(); - // Bail early if there is no way this block is of reasonable size. - if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) { - return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, - "size limits failed"); - } + // // Bail early if there is no way this block is of reasonable size. + // if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) { + // return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, + // "size limits failed"); + // } auto currentBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); - if (currentBlockSize > nMaxBlockSize) { - return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, - "size limits failed"); - } + + // if (currentBlockSize > nMaxBlockSize) { + // return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, + // "size limits failed"); + // } // And a valid coinbase. if (!CheckCoinbase(*block.vtx[0], state, false)) { @@ -4178,10 +4267,11 @@ boost::this_thread::interruption_point(); uiInterface.ShowProgress( _("Verifying blocks..."), - std::max( - 1, std::min(99, 100 - (int)(((double)(chainActive.Height() - - pindex->nHeight)) / - (double)nCheckDepth * 50)))); + std::max(1, + std::min(99, + 100 - (int)(((double)(chainActive.Height() - + pindex->nHeight)) / + (double)nCheckDepth * 50)))); pindex = chainActive.Next(pindex); CBlock block; if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus())) { @@ -4664,6 +4754,7 @@ // is valid and we have all data for its parents, it must be in // setBlockIndexCandidates. chainActive.Tip() must also be there // even if some data has been pruned. + if (pindexFirstMissing == nullptr || pindex == chainActive.Tip()) { assert(setBlockIndexCandidates.count(pindex)); @@ -4709,6 +4800,7 @@ // mapBlocksUnlinked. assert(!foundInUnlinked); } + if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {