Changeset View
Changeset View
Standalone View
Standalone View
test/functional/feature_pruning.py
#!/usr/bin/env python3 | #!/usr/bin/env python3 | ||||
# Copyright (c) 2014-2019 The Bitcoin Core developers | # Copyright (c) 2014-2019 The Bitcoin Core developers | ||||
# Distributed under the MIT software license, see the accompanying | # Distributed under the MIT software license, see the accompanying | ||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php. | # file COPYING or http://www.opensource.org/licenses/mit-license.php. | ||||
"""Test the pruning code. | """Test the pruning code. | ||||
WARNING: | WARNING: | ||||
This test uses 4GB of disk space. | This test uses 4GB of disk space. | ||||
This test takes 30 mins or more (up to 2 hours) | This test takes 30 mins or more (up to 2 hours) | ||||
""" | """ | ||||
import os | import os | ||||
from test_framework.blocktools import mine_big_block | from test_framework.blocktools import create_coinbase | ||||
from test_framework.messages import CBlock, ToHex | |||||
from test_framework.script import CScript, OP_RETURN, OP_NOP | |||||
from test_framework.test_framework import BitcoinTestFramework | from test_framework.test_framework import BitcoinTestFramework | ||||
from test_framework.util import ( | from test_framework.util import ( | ||||
assert_equal, | assert_equal, | ||||
assert_greater_than, | assert_greater_than, | ||||
assert_raises_rpc_error, | assert_raises_rpc_error, | ||||
connect_nodes, | connect_nodes, | ||||
disconnect_nodes, | |||||
sync_blocks, | sync_blocks, | ||||
wait_until, | wait_until, | ||||
) | ) | ||||
MIN_BLOCKS_TO_KEEP = 288 | MIN_BLOCKS_TO_KEEP = 288 | ||||
# Rescans start at the earliest block up to 2 hours before a key timestamp, so | # Rescans start at the earliest block up to 2 hours before a key timestamp, so | ||||
# the manual prune RPC avoids pruning blocks in the same window to be | # the manual prune RPC avoids pruning blocks in the same window to be | ||||
# compatible with pruning based on key creation time. | # compatible with pruning based on key creation time. | ||||
TIMESTAMP_WINDOW = 2 * 60 * 60 | TIMESTAMP_WINDOW = 2 * 60 * 60 | ||||
def mine_large_blocks(node, n): | |||||
# Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN | |||||
# followed by 950k of OP_NOP. This would be non-standard in a non-coinbase | |||||
# transaction but is consensus valid. | |||||
# Get the block parameters for the first block | |||||
big_script = CScript([OP_RETURN] + [OP_NOP] * 950000) | |||||
best_block = node.getblock(node.getbestblockhash()) | |||||
height = int(best_block["height"]) + 1 | |||||
try: | |||||
# Static variable ensures that time is monotonicly increasing and is therefore | |||||
# different for each block created => blockhash is unique. | |||||
mine_large_blocks.nTime = min( | |||||
mine_large_blocks.nTime, int(best_block["time"])) + 1 | |||||
except AttributeError: | |||||
mine_large_blocks.nTime = int(best_block["time"]) + 1 | |||||
previousblockhash = int(best_block["hash"], 16) | |||||
for _ in range(n): | |||||
# Build the coinbase transaction (with large scriptPubKey) | |||||
coinbase_tx = create_coinbase(height) | |||||
coinbase_tx.vin[0].nSequence = 2 ** 32 - 1 | |||||
coinbase_tx.vout[0].scriptPubKey = big_script | |||||
coinbase_tx.rehash() | |||||
# Build the block | |||||
block = CBlock() | |||||
block.nVersion = best_block["version"] | |||||
block.hashPrevBlock = previousblockhash | |||||
block.nTime = mine_large_blocks.nTime | |||||
block.nBits = int('207fffff', 16) | |||||
block.nNonce = 0 | |||||
block.vtx = [coinbase_tx] | |||||
block.hashMerkleRoot = block.calc_merkle_root() | |||||
block.solve() | |||||
# Submit to the node | |||||
node.submitblock(ToHex(block)) | |||||
previousblockhash = block.sha256 | |||||
height += 1 | |||||
mine_large_blocks.nTime += 1 | |||||
def calc_usage(blockdir): | def calc_usage(blockdir): | ||||
return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.) | return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.) | ||||
class PruneTest(BitcoinTestFramework): | class PruneTest(BitcoinTestFramework): | ||||
def set_test_params(self): | def set_test_params(self): | ||||
self.setup_clean_chain = True | self.setup_clean_chain = True | ||||
self.num_nodes = 6 | self.num_nodes = 6 | ||||
self.rpc_timeout = 900 | |||||
# Create nodes 0 and 1 to mine. | # Create nodes 0 and 1 to mine. | ||||
# Create node 2 to test pruning. | # Create node 2 to test pruning. | ||||
self.full_node_default_args = ["-maxreceivebuffer=20000", "-blockmaxsize=999000", | self.full_node_default_args = ["-maxreceivebuffer=20000", "-blockmaxsize=999000", | ||||
"-checkblocks=5", "-noparkdeepreorg", "-maxreorgdepth=-1", | "-checkblocks=5", "-noparkdeepreorg", "-maxreorgdepth=-1"] | ||||
"-limitdescendantcount=100", "-limitdescendantsize=5000", | |||||
"-limitancestorcount=100", "-limitancestorsize=5000"] | |||||
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) | # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) | ||||
# Create nodes 5 to test wallet in prune mode, but do not connect | # Create nodes 5 to test wallet in prune mode, but do not connect | ||||
self.extra_args = [self.full_node_default_args, | self.extra_args = [self.full_node_default_args, | ||||
self.full_node_default_args, | self.full_node_default_args, | ||||
["-maxreceivebuffer=20000", "-prune=550", | ["-maxreceivebuffer=20000", "-prune=550", | ||||
"-noparkdeepreorg", "-maxreorgdepth=-1"], | "-noparkdeepreorg", "-maxreorgdepth=-1"], | ||||
["-maxreceivebuffer=20000", "-blockmaxsize=999000", | ["-maxreceivebuffer=20000", "-blockmaxsize=999000", | ||||
"-noparkdeepreorg", "-maxreorgdepth=-1"], | "-noparkdeepreorg", "-maxreorgdepth=-1"], | ||||
Show All 20 Lines | class PruneTest(BitcoinTestFramework): | ||||
def create_big_chain(self): | def create_big_chain(self): | ||||
# Start by creating some coinbases we can spend later | # Start by creating some coinbases we can spend later | ||||
self.nodes[1].generate(200) | self.nodes[1].generate(200) | ||||
sync_blocks(self.nodes[0:2]) | sync_blocks(self.nodes[0:2]) | ||||
self.nodes[0].generate(150) | self.nodes[0].generate(150) | ||||
# Then mine enough full blocks to create more than 550MiB of data | # Then mine enough full blocks to create more than 550MiB of data | ||||
for i in range(645): | mine_large_blocks(self.nodes[0], 645) | ||||
mine_big_block(self.nodes[0], self.utxo_cache_0) | |||||
sync_blocks(self.nodes[0:5]) | sync_blocks(self.nodes[0:5]) | ||||
def test_height_min(self): | def test_height_min(self): | ||||
assert os.path.isfile(os.path.join( | assert os.path.isfile(os.path.join( | ||||
self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early" | self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early" | ||||
self.log.info("Success") | self.log.info("Success") | ||||
self.log.info("Though we're already using more than 550MiB, current usage: {}".format( | self.log.info("Though we're already using more than 550MiB, current usage: {}".format( | ||||
calc_usage(self.prunedir))) | calc_usage(self.prunedir))) | ||||
self.log.info( | self.log.info( | ||||
"Mining 25 more blocks should cause the first block file to be pruned") | "Mining 25 more blocks should cause the first block file to be pruned") | ||||
# Pruning doesn't run until we're allocating another chunk, 20 full | # Pruning doesn't run until we're allocating another chunk, 20 full | ||||
# blocks past the height cutoff will ensure this | # blocks past the height cutoff will ensure this | ||||
for i in range(25): | mine_large_blocks(self.nodes[0], 25) | ||||
mine_big_block(self.nodes[0], self.utxo_cache_0) | |||||
# Wait for blk00000.dat to be pruned | # Wait for blk00000.dat to be pruned | ||||
wait_until(lambda: not os.path.isfile( | wait_until(lambda: not os.path.isfile( | ||||
os.path.join(self.prunedir, "blk00000.dat")), timeout=30) | os.path.join(self.prunedir, "blk00000.dat")), timeout=30) | ||||
self.log.info("Success") | self.log.info("Success") | ||||
usage = calc_usage(self.prunedir) | usage = calc_usage(self.prunedir) | ||||
self.log.info("Usage should be below target: {}".format(usage)) | self.log.info("Usage should be below target: {}".format(usage)) | ||||
assert_greater_than(550, usage) | assert_greater_than(550, usage) | ||||
def create_chain_with_staleblocks(self): | def create_chain_with_staleblocks(self): | ||||
# Create stale blocks in manageable sized chunks | # Create stale blocks in manageable sized chunks | ||||
self.log.info( | self.log.info( | ||||
"Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") | "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") | ||||
for j in range(12): | for j in range(12): | ||||
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain | # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain | ||||
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects | # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects | ||||
# Stopping node 0 also clears its mempool, so it doesn't have | disconnect_nodes(self.nodes[0], self.nodes[1]) | ||||
# node1's transactions to accidentally mine | disconnect_nodes(self.nodes[0], self.nodes[2]) | ||||
self.stop_node(0) | |||||
self.start_node(0, extra_args=self.full_node_default_args) | |||||
# Mine 24 blocks in node 1 | # Mine 24 blocks in node 1 | ||||
for i in range(24): | mine_large_blocks(self.nodes[1], 24) | ||||
if j == 0: | |||||
mine_big_block(self.nodes[1], self.utxo_cache_1) | |||||
else: | |||||
# Add node1's wallet transactions back to the mempool, to | |||||
# avoid the mined blocks from being too small. | |||||
self.nodes[1].resendwallettransactions() | |||||
# tx's already in mempool from previous disconnects | |||||
self.nodes[1].generate(1) | |||||
# Reorg back with 25 block chain from node 0 | # Reorg back with 25 block chain from node 0 | ||||
for i in range(25): | mine_large_blocks(self.nodes[0], 25) | ||||
mine_big_block(self.nodes[0], self.utxo_cache_0) | |||||
# Create connections in the order so both nodes can see the reorg | # Create connections in the order so both nodes can see the reorg | ||||
# at the same time | # at the same time | ||||
connect_nodes(self.nodes[0], self.nodes[1]) | connect_nodes(self.nodes[0], self.nodes[1]) | ||||
connect_nodes(self.nodes[0], self.nodes[2]) | connect_nodes(self.nodes[0], self.nodes[2]) | ||||
sync_blocks(self.nodes[0:3]) | sync_blocks(self.nodes[0:3]) | ||||
self.log.info("Usage can be over target because of high stale rate: {}".format( | self.log.info("Usage can be over target because of high stale rate: {}".format( | ||||
calc_usage(self.prunedir))) | calc_usage(self.prunedir))) | ||||
def reorg_test(self): | def reorg_test(self): | ||||
# Node 1 will mine a 300 block chain starting 287 blocks back from Node | # Node 1 will mine a 300 block chain starting 287 blocks back from Node | ||||
# 0 and Node 2's tip. This will cause Node 2 to do a reorg requiring | # 0 and Node 2's tip. This will cause Node 2 to do a reorg requiring | ||||
# 288 blocks of undo data to the reorg_test chain. Reboot node 1 to | # 288 blocks of undo data to the reorg_test chain. | ||||
# clear its mempool (hopefully make the invalidate faster). Lower the | |||||
# block max size so we don't keep mining all our big mempool | |||||
# transactions (from disconnected blocks) | |||||
self.stop_node(1) | |||||
self.start_node(1, extra_args=[ | |||||
"-maxreceivebuffer=20000", "-blockmaxsize=5000", "-checkblocks=5", | |||||
"-noparkdeepreorg", "-maxreorgdepth=-1"]) | |||||
height = self.nodes[1].getblockcount() | height = self.nodes[1].getblockcount() | ||||
self.log.info("Current block height: {}".format(height)) | self.log.info("Current block height: {}".format(height)) | ||||
self.forkheight = height - 287 | self.forkheight = height - 287 | ||||
self.forkhash = self.nodes[1].getblockhash(self.forkheight) | self.forkhash = self.nodes[1].getblockhash(self.forkheight) | ||||
self.log.info("Invalidating block {} at height {}".format( | self.log.info("Invalidating block {} at height {}".format( | ||||
self.forkhash, self.forkheight)) | self.forkhash, self.forkheight)) | ||||
self.nodes[1].invalidateblock(self.forkhash) | self.nodes[1].invalidateblock(self.forkhash) | ||||
# We've now switched to our previously mined-24 block fork on node 1, but that's not what we want. | # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want. | ||||
# So invalidate that fork as well, until we're on the same chain as | # So invalidate that fork as well, until we're on the same chain as | ||||
# node 0/2 (but at an ancestor 288 blocks ago) | # node 0/2 (but at an ancestor 288 blocks ago) | ||||
mainchainhash = self.nodes[0].getblockhash(self.forkheight - 1) | mainchainhash = self.nodes[0].getblockhash(self.forkheight - 1) | ||||
curhash = self.nodes[1].getblockhash(self.forkheight - 1) | curhash = self.nodes[1].getblockhash(self.forkheight - 1) | ||||
while curhash != mainchainhash: | while curhash != mainchainhash: | ||||
self.nodes[1].invalidateblock(curhash) | self.nodes[1].invalidateblock(curhash) | ||||
curhash = self.nodes[1].getblockhash(self.forkheight - 1) | curhash = self.nodes[1].getblockhash(self.forkheight - 1) | ||||
assert self.nodes[1].getblockcount() == self.forkheight - 1 | assert self.nodes[1].getblockcount() == self.forkheight - 1 | ||||
self.log.info("New best height: {}".format( | self.log.info("New best height: {}".format( | ||||
self.nodes[1].getblockcount())) | self.nodes[1].getblockcount())) | ||||
# Reboot node1 to clear those giant tx's from mempool | # Disconnect node1 and generate the new chain | ||||
self.stop_node(1) | disconnect_nodes(self.nodes[0], self.nodes[1]) | ||||
self.start_node(1, extra_args=[ | disconnect_nodes(self.nodes[1], self.nodes[2]) | ||||
"-maxreceivebuffer=20000", "-blockmaxsize=5000", "-checkblocks=5", | |||||
"-noparkdeepreorg", "-maxreorgdepth=-1"]) | |||||
self.log.info("Generating new longer chain of 300 more blocks") | self.log.info("Generating new longer chain of 300 more blocks") | ||||
self.nodes[1].generate(300) | self.nodes[1].generate(300) | ||||
self.log.info("Reconnect nodes") | self.log.info("Reconnect nodes") | ||||
connect_nodes(self.nodes[0], self.nodes[1]) | connect_nodes(self.nodes[0], self.nodes[1]) | ||||
connect_nodes(self.nodes[1], self.nodes[2]) | connect_nodes(self.nodes[1], self.nodes[2]) | ||||
sync_blocks(self.nodes[0:3], timeout=120) | sync_blocks(self.nodes[0:3], timeout=120) | ||||
self.log.info("Verify height on node 2: {}".format( | self.log.info("Verify height on node 2: {}".format( | ||||
self.nodes[2].getblockcount())) | self.nodes[2].getblockcount())) | ||||
self.log.info("Usage possibly still high because of stale blocks in block files: {}".format( | self.log.info("Usage possibly still high because of stale blocks in block files: {}".format( | ||||
calc_usage(self.prunedir))) | calc_usage(self.prunedir))) | ||||
self.log.info("Mine 220 more blocks so we have requisite history") | self.log.info( | ||||
"Mine 220 more large blocks so we have requisite history") | |||||
# Get node0's wallet transactions back in its mempool, to avoid the | mine_large_blocks(self.nodes[0], 220) | ||||
# mined blocks from being too small. | |||||
self.nodes[0].resendwallettransactions() | |||||
for i in range(22): | |||||
# This can be slow, so do this in multiple RPC calls to avoid | |||||
# RPC timeouts. | |||||
# node 0 has many large tx's in its mempool from the disconnects | |||||
self.nodes[0].generate(10) | |||||
sync_blocks(self.nodes[0:3], timeout=300) | |||||
usage = calc_usage(self.prunedir) | usage = calc_usage(self.prunedir) | ||||
self.log.info("Usage should be below target: {}".format(usage)) | self.log.info("Usage should be below target: {}".format(usage)) | ||||
assert_greater_than(550, usage) | assert_greater_than(550, usage) | ||||
def reorg_back(self): | def reorg_back(self): | ||||
# Verify that a block on the old main chain fork has been pruned away | # Verify that a block on the old main chain fork has been pruned away | ||||
assert_raises_rpc_error( | assert_raises_rpc_error( | ||||
▲ Show 20 Lines • Show All 154 Lines • ▼ Show 20 Lines | def wallet_test(self): | ||||
sync_blocks(nds, wait=5, timeout=300) | sync_blocks(nds, wait=5, timeout=300) | ||||
# Stop and start to trigger rescan | # Stop and start to trigger rescan | ||||
self.stop_node(5) | self.stop_node(5) | ||||
self.start_node( | self.start_node( | ||||
5, extra_args=["-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"]) | 5, extra_args=["-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"]) | ||||
self.log.info("Success") | self.log.info("Success") | ||||
def run_test(self): | def run_test(self): | ||||
self.log.info( | self.log.info("Warning! This test requires 4GB of disk space") | ||||
"Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") | |||||
self.log.info("Mining a big blockchain of 995 blocks") | |||||
# Determine default relay fee | |||||
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] | |||||
# Cache for utxos, as the listunspent may take a long time later in the | |||||
# test | |||||
self.utxo_cache_0 = [] | |||||
self.utxo_cache_1 = [] | |||||
self.log.info("Mining a big blockchain of 995 blocks") | |||||
self.create_big_chain() | self.create_big_chain() | ||||
# Chain diagram key: | # Chain diagram key: | ||||
# * blocks on main chain | # * blocks on main chain | ||||
# +,&,$,@ blocks on other forks | # +,&,$,@ blocks on other forks | ||||
# X invalidated block | # X invalidated block | ||||
# N1 Node 1 | # N1 Node 1 | ||||
# | # | ||||
# Start by mining a simple chain that all nodes have | # Start by mining a simple chain that all nodes have | ||||
▲ Show 20 Lines • Show All 105 Lines • Show Last 20 Lines |