diff --git a/src/config.h b/src/config.h --- a/src/config.h +++ b/src/config.h @@ -64,4 +64,7 @@ // Temporary woraround. const Config &GetConfig(); +// TODO (EBP): a non-const reference is needed to updated the blocksize +Config &GetConfigTemp(); + #endif diff --git a/src/config.cpp b/src/config.cpp --- a/src/config.cpp +++ b/src/config.cpp @@ -47,6 +47,11 @@ return gConfig; } +// TODO (EBP): a non-const reference is needed to updated the blocksize +Config &GetConfigTemp() { + return gConfig; +} + void GlobalConfig::SetCashAddrEncoding(bool c) { useCashAddr = c; } diff --git a/src/validation.cpp b/src/validation.cpp --- a/src/validation.cpp +++ b/src/validation.cpp @@ -153,6 +153,10 @@ /** chainwork for the last block that preciousblock has been applied to. */ arith_uint256 nLastPreciousChainwork = 0; +/** Extensible Blockchain Protocol . */ +std::map EBPBlockMap; +uint64_t max_size_ebp_block = 0; + /** Dirty block index entries. */ std::set setDirtyBlockIndex; @@ -2355,8 +2359,8 @@ nLastSetChain = nNow; } } catch (const std::runtime_error &e) { - return AbortNode( - state, std::string("System error while flushing: ") + e.what()); + return AbortNode(state, std::string("System error while flushing: ") + + e.what()); } return true; } @@ -3348,7 +3352,10 @@ } // Size limits. - auto nMaxBlockSize = config.GetMaxBlockSize(); + + // TODO: set a max_block_size to avoid attacks + // Temporary solution, reject blocks bigger than 64MBs + auto nMaxBlockSize = 64 * ONE_MEGABYTE; // Bail early if there is no way this block is of reasonable size. if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) { @@ -3372,6 +3379,18 @@ state.GetDebugMessage())); } + if (fCheckPOW && fCheckMerkleRoot) { + block.fChecked = true; + } + + return true; +} + +bool CheckBlockTransactions(const Config &config, const CBlock &block, + CValidationState &state) { + + auto currentBlockSize = + ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); // Keep track of the sigops count. uint64_t nSigOps = 0; auto nMaxSigOpsCount = GetMaxBlockSigOpsCount(currentBlockSize); @@ -3410,10 +3429,6 @@ } } - if (fCheckPOW && fCheckMerkleRoot) { - block.fChecked = true; - } - return true; } @@ -3685,6 +3700,125 @@ return true; } +static std::pair CheckBlockIsEBP(const CBlock &block, + const Config &config) { + // Return true if the block is EBP and also returns its blockssize + + // EBP Rules: + // 1-Checkblocksize + auto nMaxBlockSize = config.GetMaxBlockSize(); + auto currentBlockSize = + ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); + if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) { + return std::make_pair(true, currentBlockSize); + } + if (currentBlockSize > nMaxBlockSize) { + return std::make_pair(true, currentBlockSize); + } + + // TODO: start to ban the node with low values to avoid getting spammed with + // invalid blocks + // state.DoS(3, false, REJECT_INVALID, "bad-blk-length", false,"size limits + // failed"); + + return std::make_pair(false, currentBlockSize); +} + +static bool AddToEBPIndex(CBlockIndex *&pindex, const CBlock &block, + bool is_ebp) { + if (is_ebp) { + // The block is EBP so added to the index + EBPBlockMap.insert(std::make_pair(pindex, block)); + return true; + } else { + // Check if the block is a son of an EBP block + for (auto &i : EBPBlockMap) { + if (i.first == pindex->pprev) { + EBPBlockMap.insert(std::make_pair(pindex, block)); + return true; + } + } + } + return false; +} + +static bool SaveBlockToDisk(CBlockIndex *&pindex, const CBlock &block, + const CDiskBlockPos *dbp, CValidationState &state, + const CChainParams &chainparams) { + int nHeight = pindex->nHeight; + // Write block to history file + try { + unsigned int nBlockSize = + ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); + CDiskBlockPos blockPos; + if (dbp != nullptr) { + blockPos = *dbp; + } + if (!FindBlockPos(state, blockPos, nBlockSize + 8, nHeight, + block.GetBlockTime(), dbp != nullptr)) { + return error("AcceptBlock(): FindBlockPos failed"); + } + if (dbp == nullptr) { + if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) { + return AbortNode(state, "Failed to write block"); + } + } + if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) { + return error("AcceptBlock(): ReceivedBlockTransactions failed"); + } + } catch (const std::runtime_error &e) { + return AbortNode(state, std::string("System error: ") + e.what()); + } + + return true; +} + +static uint64_t GetNewConsensusBlockSize(const Config &config, + const uint64_t ebp_block_size) { + // Duplicate the current blocksize until it is bigger than the + // ebp_block_size + uint64_t next_block_size = config.GetMaxBlockSize(); + while (next_block_size < ebp_block_size) { + next_block_size = next_block_size * 2; + } + return next_block_size; +} + +static bool FindAllEBPBlocks(CBlockIndex *&last_block, + std::list &blocks_to_store) { + // Check if all the blocks on the EBP chain are ready to be stored and + // reorganize + + auto pindexFork = chainActive.FindFork(last_block); + auto temp = last_block; + + while (temp->pprev != pindexFork) { + // TODO: pindexFork may not be in EBPBlockMap if the fork started before + // the first EBP block + // It also needs to check if the block is already stored in the database + if (EBPBlockMap.find(temp) != EBPBlockMap.end()) { + blocks_to_store.push_front(temp); + + } else { + // Check if the starting blocks of the fork were not EBP + auto const candidate = setBlockIndexCandidates.find(temp); + if (candidate == setBlockIndexCandidates.end()) { + // TODO: in case of pruned nodes, the block may not have data + // the temp pindex may need to be checked on mapBlocksUnlinked, + // and verify that the block is valid (BLOCK_FAILED_MASK) and + // doesn't have data (BLOCK_HAVE_DATA) + // Do not exists + return false; + } else if ((*candidate)->nStatus & BLOCK_FAILED_MASK) { + // Failed + return false; + } + } + temp = temp->pprev; + } + return true; +} + /** * Store block on disk. If dbp is non-null, the file is known to already reside * on disk. @@ -3757,9 +3891,10 @@ *fNewBlock = true; } - if (!CheckBlock(config, block, state) || - !ContextualCheckBlock(config, block, state, pindex->pprev)) { + if (!CheckBlock(config, block, state)) { if (state.IsInvalid() && !state.CorruptionPossible()) { + // TODO: check what happens when a EBP blocks arrives in a wrong + // order (w/o parent already stored) pindex->nStatus |= BLOCK_FAILED_VALID; setDirtyBlockIndex.insert(pindex); } @@ -3767,43 +3902,130 @@ block.GetHash().ToString()); } - // Header is valid/has work, merkle tree and segwit merkle tree are - // good...RELAY NOW (but if it does not build on our best tip, let the - // SendMessages loop relay it) - if (!IsInitialBlockDownload() && chainActive.Tip() == pindex->pprev) { - GetMainSignals().NewPoWValidBlock(pindex, pblock); - } + // Check if the block is EBP or if it's a block included in a ebp chain + const auto ebp = CheckBlockIsEBP(block, config); + if (AddToEBPIndex(pindex, block, ebp.first)) { + // Save the size of the biggest block on the EBP chain + if (ebp.second > max_size_ebp_block) { + max_size_ebp_block = ebp.second; + } + + // Check if the EBP chain is at least 12 blocks bigger than the current + // active chain (using PoW) + auto diff = chainActive.Tip()->nChainWork - + chainActive.Tip()->pprev->nChainWork; + if (pindex->nChainWork >= (chainActive.Tip()->nChainWork + 12 * diff)) { + std::list blocks_to_store; + if (FindAllEBPBlocks(pindex, blocks_to_store)) { + // Push the first block, the one that its previous == pindexFork + blocks_to_store.push_front(pindex); + + // Complete the EBP blocks validation + bool ebp_valid = true; + for (auto block_index : blocks_to_store) { + if (!ebp_valid) { + // If a block is not valid, all of its childrens are + // also invalid. + block_index->nStatus |= BLOCK_FAILED_VALID; + setDirtyBlockIndex.insert(block_index); + EBPBlockMap.erase(block_index); + } else { + // Validate the EBP block + auto temp_block = EBPBlockMap.find(block_index); + if (temp_block != EBPBlockMap.end()) { + if (!CheckBlockTransactions( + config, temp_block->second, state) || + !ContextualCheckBlock(config, + temp_block->second, state, + pindex->pprev)) { + if (state.IsInvalid() && + !state.CorruptionPossible()) { + block_index->nStatus |= BLOCK_FAILED_VALID; + setDirtyBlockIndex.insert(block_index); + EBPBlockMap.erase(block_index); + } + ebp_valid = false; + } + } else { + // The block no longer exists (but it was found by + // FindAllEBPBlocks) it should never enter here + // TODO: remove everything from the EBPBlockMap (?) + return false; + } + } + } + if (!ebp_valid) { + // At least one of the EBP blocks is invalid, do not store + // the chain + return false; + } - int nHeight = pindex->nHeight; - const CChainParams &chainparams = config.GetChainParams(); + // EBP chain is valid, update the consensus and reorganize + // TODO: this new_max_size must be saved to disk + const auto new_max_size = + GetNewConsensusBlockSize(config, max_size_ebp_block); + GetConfigTemp().SetMaxBlockSize(new_max_size); + + // Save to disk the EBP chain + for (auto block_index : blocks_to_store) { + auto temp_block = EBPBlockMap.find(block_index); + if (temp_block != EBPBlockMap.end()) { + if (!SaveBlockToDisk(block_index, temp_block->second, + dbp, state, + config.GetChainParams())) { + return false; + } + } + EBPBlockMap.erase(block_index); + } - // Write block to history file - try { - unsigned int nBlockSize = - ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); - CDiskBlockPos blockPos; - if (dbp != nullptr) { - blockPos = *dbp; - } - if (!FindBlockPos(state, blockPos, nBlockSize + 8, nHeight, - block.GetBlockTime(), dbp != nullptr)) { - return error("AcceptBlock(): FindBlockPos failed"); - } - if (dbp == nullptr) { - if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) { - AbortNode(state, "Failed to write block"); + // Reorganization is complete + return true; + + } else { + // The chain is not complete, wait for the nodes to send the + // blocks + return false; } + } else { + // Not enough PoW to activate EBP + return state.DoS(1, false, REJECT_INVALID, "bad-blk-length", false, + "size limits failed"); } - if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) { - return error("AcceptBlock(): ReceivedBlockTransactions failed"); - } - } catch (const std::runtime_error &e) { - return AbortNode(state, std::string("System error: ") + e.what()); - } - if (fCheckForPruning) { - // we just allocated more disk space for block files. - FlushStateToDisk(config.GetChainParams(), state, FLUSH_STATE_NONE); + } else { + // The block doesn't belong to an EBP chain, continue the validation and + // store the block + if (!CheckBlockTransactions(config, block, state) || + !ContextualCheckBlock(config, block, state, pindex->pprev)) { + if (state.IsInvalid() && !state.CorruptionPossible()) { + pindex->nStatus |= BLOCK_FAILED_VALID; + setDirtyBlockIndex.insert(pindex); + } + return error("%s: %s (block %s)", __func__, + FormatStateMessage(state), block.GetHash().ToString()); + } else { + // Header is valid/has work, merkle tree and segwit merkle tree are + // good...RELAY NOW (but if it does not build on our best tip, let + // the + // SendMessages loop relay it) + if (!IsInitialBlockDownload() && + chainActive.Tip() == pindex->pprev) { + GetMainSignals().NewPoWValidBlock(pindex, pblock); + } + + // Write block to history file + if (!SaveBlockToDisk(pindex, block, dbp, state, + config.GetChainParams())) { + return false; + } + + if (fCheckForPruning) { + // we just allocated more disk space for block files. + FlushStateToDisk(config.GetChainParams(), state, + FLUSH_STATE_NONE); + } + } } return true; @@ -4399,11 +4621,10 @@ boost::this_thread::interruption_point(); uiInterface.ShowProgress( _("Verifying blocks..."), - std::max(1, - std::min(99, - 100 - (int)(((double)(chainActive.Height() - - pindex->nHeight)) / - (double)nCheckDepth * 50)))); + std::max( + 1, std::min(99, 100 - (int)(((double)(chainActive.Height() - + pindex->nHeight)) / + (double)nCheckDepth * 50)))); pindex = chainActive.Next(pindex); CBlock block; if (!ReadBlockFromDisk(block, pindex, config)) { @@ -5315,4 +5536,4 @@ delete (*it1).second; mapBlockIndex.clear(); } -} instance_of_cmaincleanup; +} instance_of_cmaincleanup; \ No newline at end of file diff --git a/test/functional/abc-ebp.py b/test/functional/abc-ebp.py new file mode 100644 --- /dev/null +++ b/test/functional/abc-ebp.py @@ -0,0 +1,376 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2017 The Bitcoin developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +This test checks simple acceptance of bigger blocks via p2p. +It is derived from the much more complex p2p-fullblocktest. +The intention is that small tests can be derived from this one, or +this one can be extended, to cover the checks done for bigger blocks +(e.g. sigops limits). +""" + +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import * +from test_framework.comptool import TestManager, TestInstance, RejectResult +from test_framework.blocktools import * +import time +from test_framework.key import CECKey +from test_framework.script import * +from test_framework.cdefs import (ONE_MEGABYTE, LEGACY_MAX_BLOCK_SIZE, + MAX_BLOCK_SIGOPS_PER_MB, MAX_TX_SIGOPS_COUNT) + + +class PreviousSpendableOutput(): + + def __init__(self, tx=CTransaction(), n=-1): + self.tx = tx + self.n = n # the output we're spending + + +# TestNode: A peer we use to send messages to bitcoind, and store responses. +class TestNode(NodeConnCB): + + def __init__(self): + self.last_sendcmpct = None + self.last_cmpctblock = None + self.last_getheaders = None + self.last_headers = None + super().__init__() + + def on_sendcmpct(self, conn, message): + self.last_sendcmpct = message + + def on_cmpctblock(self, conn, message): + self.last_cmpctblock = message + self.last_cmpctblock.header_and_shortids.header.calc_sha256() + + def on_getheaders(self, conn, message): + self.last_getheaders = message + + def on_headers(self, conn, message): + self.last_headers = message + for x in self.last_headers.headers: + x.calc_sha256() + + def clear_block_data(self): + with mininode_lock: + self.last_sendcmpct = None + self.last_cmpctblock = None + + +class EBPBlockTest(ComparisonTestFramework): + + # Can either run this test as 1 node with expected answers, or two and compare them. + # Change the "outcome" variable from each TestInstance object to only do + # the comparison. + + def set_test_params(self): + self.num_nodes = 2 + self.setup_clean_chain = True + self.block_heights = {} + self.coinbase_key = CECKey() + self.coinbase_key.set_secretbytes(b"fatstacks") + self.coinbase_pubkey = self.coinbase_key.get_pubkey() + self.tip = None + self.blocks = {} + self.excessive_block_size = 2 * ONE_MEGABYTE + self.extra_args = [['-norelaypriority', + '-whitelist=127.0.0.1', + '-limitancestorcount=9999', + '-limitancestorsize=9999', + '-limitdescendantcount=9999', + '-limitdescendantsize=9999', + '-maxmempool=999', + "-excessiveblocksize=%d" + % self.excessive_block_size], + ['-norelaypriority', + '-whitelist=127.0.0.1', + '-limitancestorcount=9999', + '-limitancestorsize=9999', + '-limitdescendantcount=9999', + '-limitdescendantsize=9999', + '-maxmempool=999', + "-excessiveblocksize=%d" + % self.excessive_block_size] + ] + + def add_options(self, parser): + super().add_options(parser) + parser.add_option( + "--runbarelyexpensive", dest="runbarelyexpensive", default=True) + + def run_test(self): + self.test = TestManager(self, self.options.tmpdir) + self.test.add_all_connections(self.nodes) + # Start up network handling in another thread + NetworkThread().start() + # Set the blocksize to 16MB as initial condition + self.nodes[0].setexcessiveblock(16 * ONE_MEGABYTE) + self.test.run() + + def add_transactions_to_block(self, block, tx_list): + [tx.rehash() for tx in tx_list] + block.vtx.extend(tx_list) + + # this is a little handier to use than the version in blocktools.py + def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): + tx = create_transaction(spend_tx, n, b"", value, script) + return tx + + # sign a transaction, using the key we know about + # this signs input 0 in tx, which is assumed to be spending output n in + # spend_tx + def sign_tx(self, tx, spend_tx, n): + scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) + if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend + tx.vin[0].scriptSig = CScript() + return + sighash = SignatureHashForkId( + spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue) + tx.vin[0].scriptSig = CScript( + [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) + + def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): + tx = self.create_tx(spend_tx, n, value, script) + self.sign_tx(tx, spend_tx, n) + tx.rehash() + return tx + + def next_block(self, number, spend=None, additional_coinbase_value=0, script=None, extra_sigops=0, block_size=0, solve=True, submit=True, base_hash=None, base_time=None, base_height=None): + """ + Create a block on top of self.tip, and advance self.tip to point to the new block + if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend + output, and rest will go to fees. + """ + if self.tip == None: + base_block_hash = self.genesis_hash + block_time = int(time.time()) + 1 + else: + if base_hash == None and base_time == None: + base_block_hash = self.tip.sha256 + block_time = self.tip.nTime + 1 + else: + base_block_hash = base_hash + block_time = base_time + # First create the coinbase + if base_height == None: + height = self.block_heights[base_block_hash] + 1 + else: + height = base_height + coinbase = create_coinbase(height, self.coinbase_pubkey) + coinbase.vout[0].nValue += additional_coinbase_value + if (spend != None): + coinbase.vout[0].nValue += spend.tx.vout[ + spend.n].nValue - 1 # all but one satoshi to fees + coinbase.rehash() + block = create_block(base_block_hash, coinbase, block_time) + spendable_output = None + if (spend != None): + tx = CTransaction() + # no signature yet + tx.vin.append( + CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) + # We put some random data into the first transaction of the chain + # to randomize ids + tx.vout.append( + CTxOut(0, CScript([random.randint(0, 255), OP_DROP, OP_TRUE]))) + if script == None: + tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) + else: + tx.vout.append(CTxOut(1, script)) + spendable_output = PreviousSpendableOutput(tx, 0) + + # Now sign it if necessary + scriptSig = b"" + scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey) + if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend + scriptSig = CScript([OP_TRUE]) + else: + # We have to actually sign it + sighash = SignatureHashForkId( + spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend.tx.vout[spend.n].nValue) + scriptSig = CScript( + [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) + tx.vin[0].scriptSig = scriptSig + # Now add the transaction to the block + self.add_transactions_to_block(block, [tx]) + block.hashMerkleRoot = block.calc_merkle_root() + if spendable_output != None and block_size > 0: + while len(block.serialize()) < block_size: + tx = CTransaction() + script_length = block_size - len(block.serialize()) - 79 + if script_length > 510000: + script_length = 500000 + tx_sigops = min( + extra_sigops, script_length, MAX_TX_SIGOPS_COUNT) + extra_sigops -= tx_sigops + script_pad_len = script_length - tx_sigops + script_output = CScript( + [b'\x00' * script_pad_len] + [OP_CHECKSIG] * tx_sigops) + tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) + tx.vout.append(CTxOut(0, script_output)) + tx.vin.append( + CTxIn(COutPoint(spendable_output.tx.sha256, spendable_output.n))) + spendable_output = PreviousSpendableOutput(tx, 0) + self.add_transactions_to_block(block, [tx]) + block.hashMerkleRoot = block.calc_merkle_root() + # Make sure the math above worked out to produce the correct block size + # (the math will fail if there are too many transactions in the block) + assert_equal(len(block.serialize()), block_size) + # Make sure all the requested sigops have been included + assert_equal(extra_sigops, 0) + if solve: + block.solve() + if submit: + self.tip = block + self.block_heights[block.sha256] = height + assert number not in self.blocks + self.blocks[number] = block + return block + + def get_tests(self): + self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) + self.block_heights[self.genesis_hash] = 0 + spendable_outputs = [] + + # save the current tip so it can be spent by a later block + def save_spendable_output(): + spendable_outputs.append(self.tip) + + # get an output that we previously marked as spendable + def get_spendable_output(): + return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) + + # returns a test case that asserts that the current tip was accepted + def accepted(): + return TestInstance([[self.tip, True]]) + + # returns a test case that asserts that the current tip was rejected + def rejected(reject=None): + if reject is None: + return TestInstance([[self.tip, False]]) + else: + return TestInstance([[self.tip, reject]]) + + # move the tip back to a previous block + def tip(number): + self.tip = self.blocks[number] + + # adds transactions to the block and updates state + def update_block(block_number, new_transactions): + block = self.blocks[block_number] + self.add_transactions_to_block(block, new_transactions) + old_sha256 = block.sha256 + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + # Update the internal state just like in next_block + self.tip = block + if block.sha256 != old_sha256: + self.block_heights[ + block.sha256] = self.block_heights[old_sha256] + del self.block_heights[old_sha256] + self.blocks[block_number] = block + return block + + # shorthand for functions + block = self.next_block + + connect_nodes_bi(self.nodes, 0, 1) + + # Create a new block + block(0) + save_spendable_output() + yield accepted() + + # Now we need that block to mature so we can spend the coinbase. + test = TestInstance(sync_every_block=False) + for i in range(200): + block(5000 + i) + test.blocks_and_transactions.append([self.tip, True]) + save_spendable_output() + yield test + + # collect spendable outputs now to avoid cluttering the code later on + out = [] + for i in range(200): + out.append(get_spendable_output()) + + # Let's build some blocks and test them. + for i in range(16): + n = i + 1 + block(n, spend=out[i], block_size=2 * ONE_MEGABYTE) + yield accepted() + + # block of maximal size + block(17, spend=out[16], block_size=self.excessive_block_size) + yield accepted() + + # Save current tip variables + base_block_hash = self.tip.sha256 + block_time = self.tip.nTime + 1 + block_height = self.block_heights[base_block_hash] + 1 + + node_0_count = self.nodes[0].getblockcount() # 218 + node_1_count = self.nodes[1].getblockcount() # 218 + assert_equal( + self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash()) + + # 4MB blocks should only be accepted by node 0 + block_temp = block( + 18, spend=out[17], block_size=4 * ONE_MEGABYTE, solve=False, + submit=False, base_hash=base_block_hash, base_time=block_time, base_height=block_height) + block_temp.solve() + self.nodes[0].submitblock(ToHex(block_temp)) + # The block was submited + node_0_count = node_0_count + 1 + assert_equal(self.nodes[0].getblockcount(), node_0_count) + assert_equal(self.nodes[1].getblockcount(), node_1_count) + + # 10 More 4Mb blocks are created and submited + n = 19 + for i in range(10): + # TODO: refactor the creation of a new block + base_block_hash = block_temp.sha256 + block_time = block_temp.nTime + 1 + block_height = block_height + 1 + n = n + 1 + block_temp = block( + n, spend=out[n - 1], block_size=4 * ONE_MEGABYTE, solve=False, + submit=False, base_hash=base_block_hash, base_time=block_time, base_height=block_height) + block_temp.solve() + self.nodes[0].submitblock(ToHex(block_temp)) + node_0_count = node_0_count + 1 + assert_equal(self.nodes[0].getblockcount(), node_0_count) + assert_equal(self.nodes[1].getblockcount(), node_1_count) + + # Give the node time in case it needs to reorg + time.sleep(3) + assert_equal(self.nodes[0].getblockcount(), node_0_count) # 229 + assert_equal(self.nodes[1].getblockcount(), node_1_count) # 219 + + # The next block should activate EBP on node 1 + base_block_hash = block_temp.sha256 + block_time = block_temp.nTime + 1 + block_height = block_height + 1 + n = n + 1 + block_temp = block( + n, spend=out[n - 1], block_size=4 * ONE_MEGABYTE, solve=False, + submit=False, base_hash=base_block_hash, base_time=block_time, base_height=block_height) + block_temp.solve() + self.nodes[0].submitblock(ToHex(block_temp)) + + # Let the node activate EBP and reorg + time.sleep(3) + assert_equal( + self.nodes[1].getblockcount(), self.nodes[0].getblockcount()) # 230 == 230 + + # TODO: create an 8Mb chain and test if EBP also works with the new chain + # TODO: make the node, that will activate EBP, create a small chain to + # "fight" the one with bigger blocks (Make sure that when EBP activates + # it's because it had more PoW) + + +if __name__ == '__main__': + EBPBlockTest().main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -117,6 +117,7 @@ 'p2p-leaktests.py', 'abc-cmdline.py', 'abc-p2p-fullblocktest.py', + 'abc-ebp.py', 'abc-rpc.py', 'abc-high_priority_transaction.py', 'abc-mempool-accept-txn.py', @@ -160,7 +161,8 @@ NON_SCRIPTS = [ - # These are python files that live in the functional tests directory, but are not test scripts. + # These are python files that live in the functional tests directory, but + # are not test scripts. "combine_logs.py", "create_cache.py", "test_runner.py", @@ -437,11 +439,13 @@ (self.tmpdir, re.sub(".py$", "", t), portseed)] self.jobs.append((t, time.time(), - subprocess.Popen([os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir, + subprocess.Popen( + [os.path.join(self.tests_dir, test_argv[0])] + test_argv[ + 1:] + self.flags + portseed_arg + tmpdir, universal_newlines=True, stdout=log_stdout, stderr=log_stderr), - log_stdout, + log_stdout, log_stderr)) if not self.jobs: raise IndexError('pop from empty list') @@ -473,6 +477,7 @@ class TestResult(): + def __init__(self, name, status, time, stdout, stderr): self.name = name self.status = status @@ -508,11 +513,13 @@ print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % ( BOLD[1], BOLD[0], str(missed_tests))) if on_ci(): - # On CI this warning is an error to prevent merging incomplete commits into master + # On CI this warning is an error to prevent merging incomplete + # commits into master sys.exit(1) class RPCCoverage(): + """ Coverage reporting utilities for test_runner. @@ -618,7 +625,8 @@ class Timings(): - """ + + """ Takes care of loading, merging and saving tests execution times. """