diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -11,13 +11,16 @@ import os -from test_framework.blocktools import mine_big_block +from test_framework.blocktools import create_coinbase +from test_framework.messages import CBlock, ToHex +from test_framework.script import CScript, OP_RETURN, OP_NOP from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, + disconnect_nodes, sync_blocks, wait_until, ) @@ -31,6 +34,50 @@ TIMESTAMP_WINDOW = 2 * 60 * 60 +def mine_large_blocks(node, n): + # Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN + # followed by 950k of OP_NOP. This would be non-standard in a non-coinbase + # transaction but is consensus valid. + + # Get the block parameters for the first block + big_script = CScript([OP_RETURN] + [OP_NOP] * 950000) + best_block = node.getblock(node.getbestblockhash()) + height = int(best_block["height"]) + 1 + try: + # Static variable ensures that time is monotonicly increasing and is therefore + # different for each block created => blockhash is unique. + mine_large_blocks.nTime = min( + mine_large_blocks.nTime, int(best_block["time"])) + 1 + except AttributeError: + mine_large_blocks.nTime = int(best_block["time"]) + 1 + previousblockhash = int(best_block["hash"], 16) + + for _ in range(n): + # Build the coinbase transaction (with large scriptPubKey) + coinbase_tx = create_coinbase(height) + coinbase_tx.vin[0].nSequence = 2 ** 32 - 1 + coinbase_tx.vout[0].scriptPubKey = big_script + coinbase_tx.rehash() + + # Build the block + block = CBlock() + block.nVersion = best_block["version"] + block.hashPrevBlock = previousblockhash + block.nTime = mine_large_blocks.nTime + block.nBits = int('207fffff', 16) + block.nNonce = 0 + block.vtx = [coinbase_tx] + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + + # Submit to the node + node.submitblock(ToHex(block)) + + previousblockhash = block.sha256 + height += 1 + mine_large_blocks.nTime += 1 + + def calc_usage(blockdir): return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.) @@ -39,14 +86,11 @@ def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 6 - self.rpc_timeout = 900 # Create nodes 0 and 1 to mine. # Create node 2 to test pruning. self.full_node_default_args = ["-maxreceivebuffer=20000", "-blockmaxsize=999000", - "-checkblocks=5", "-noparkdeepreorg", "-maxreorgdepth=-1", - "-limitdescendantcount=100", "-limitdescendantsize=5000", - "-limitancestorcount=100", "-limitancestorsize=5000"] + "-checkblocks=5", "-noparkdeepreorg", "-maxreorgdepth=-1"] # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) # Create nodes 5 to test wallet in prune mode, but do not connect self.extra_args = [self.full_node_default_args, @@ -83,8 +127,7 @@ self.nodes[0].generate(150) # Then mine enough full blocks to create more than 550MiB of data - for i in range(645): - mine_big_block(self.nodes[0], self.utxo_cache_0) + mine_large_blocks(self.nodes[0], 645) sync_blocks(self.nodes[0:5]) @@ -98,8 +141,7 @@ "Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full # blocks past the height cutoff will ensure this - for i in range(25): - mine_big_block(self.nodes[0], self.utxo_cache_0) + mine_large_blocks(self.nodes[0], 25) # Wait for blk00000.dat to be pruned wait_until(lambda: not os.path.isfile( @@ -118,24 +160,13 @@ for j in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects - # Stopping node 0 also clears its mempool, so it doesn't have - # node1's transactions to accidentally mine - self.stop_node(0) - self.start_node(0, extra_args=self.full_node_default_args) + disconnect_nodes(self.nodes[0], self.nodes[1]) + disconnect_nodes(self.nodes[0], self.nodes[2]) # Mine 24 blocks in node 1 - for i in range(24): - if j == 0: - mine_big_block(self.nodes[1], self.utxo_cache_1) - else: - # Add node1's wallet transactions back to the mempool, to - # avoid the mined blocks from being too small. - self.nodes[1].resendwallettransactions() - # tx's already in mempool from previous disconnects - self.nodes[1].generate(1) + mine_large_blocks(self.nodes[1], 24) # Reorg back with 25 block chain from node 0 - for i in range(25): - mine_big_block(self.nodes[0], self.utxo_cache_0) + mine_large_blocks(self.nodes[0], 25) # Create connections in the order so both nodes can see the reorg # at the same time @@ -149,14 +180,7 @@ def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node # 0 and Node 2's tip. This will cause Node 2 to do a reorg requiring - # 288 blocks of undo data to the reorg_test chain. Reboot node 1 to - # clear its mempool (hopefully make the invalidate faster). Lower the - # block max size so we don't keep mining all our big mempool - # transactions (from disconnected blocks) - self.stop_node(1) - self.start_node(1, extra_args=[ - "-maxreceivebuffer=20000", "-blockmaxsize=5000", "-checkblocks=5", - "-noparkdeepreorg", "-maxreorgdepth=-1"]) + # 288 blocks of undo data to the reorg_test chain. height = self.nodes[1].getblockcount() self.log.info("Current block height: {}".format(height)) @@ -180,11 +204,9 @@ self.log.info("New best height: {}".format( self.nodes[1].getblockcount())) - # Reboot node1 to clear those giant tx's from mempool - self.stop_node(1) - self.start_node(1, extra_args=[ - "-maxreceivebuffer=20000", "-blockmaxsize=5000", "-checkblocks=5", - "-noparkdeepreorg", "-maxreorgdepth=-1"]) + # Disconnect node1 and generate the new chain + disconnect_nodes(self.nodes[0], self.nodes[1]) + disconnect_nodes(self.nodes[1], self.nodes[2]) self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) @@ -199,18 +221,10 @@ self.log.info("Usage possibly still high because of stale blocks in block files: {}".format( calc_usage(self.prunedir))) - self.log.info("Mine 220 more blocks so we have requisite history") - - # Get node0's wallet transactions back in its mempool, to avoid the - # mined blocks from being too small. - self.nodes[0].resendwallettransactions() + self.log.info( + "Mine 220 more large blocks so we have requisite history") - for i in range(22): - # This can be slow, so do this in multiple RPC calls to avoid - # RPC timeouts. - # node 0 has many large tx's in its mempool from the disconnects - self.nodes[0].generate(10) - sync_blocks(self.nodes[0:3], timeout=300) + mine_large_blocks(self.nodes[0], 220) usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: {}".format(usage)) @@ -381,18 +395,9 @@ self.log.info("Success") def run_test(self): - self.log.info( - "Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") - self.log.info("Mining a big blockchain of 995 blocks") - - # Determine default relay fee - self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] - - # Cache for utxos, as the listunspent may take a long time later in the - # test - self.utxo_cache_0 = [] - self.utxo_cache_1 = [] + self.log.info("Warning! This test requires 4GB of disk space") + self.log.info("Mining a big blockchain of 995 blocks") self.create_big_chain() # Chain diagram key: # * blocks on main chain