diff --git a/test/functional/feature_loadblock.py b/test/functional/feature_loadblock.py index 162f9cb03..4c99bc0bf 100755 --- a/test/functional/feature_loadblock.py +++ b/test/functional/feature_loadblock.py @@ -1,87 +1,86 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test loadblock option Test the option to start a node with the option loadblock which loads a serialized blockchain from a file (usually called bootstrap.dat). To generate that file this test uses the helper scripts available in contrib/linearize. """ import os import subprocess import sys import tempfile import urllib from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class LoadblockTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.supports_cli = False def run_test(self): self.nodes[1].setnetworkactive(state=False) self.nodes[0].generate(100) # Parsing the url of our node to get settings for config file data_dir = self.nodes[0].datadir node_url = urllib.parse.urlparse(self.nodes[0].url) cfg_file = os.path.join(data_dir, "linearize.cfg") bootstrap_file = os.path.join(self.options.tmpdir, "bootstrap.dat") genesis_block = self.nodes[0].getblockhash(0) blocks_dir = os.path.join(data_dir, self.chain, "blocks") hash_list = tempfile.NamedTemporaryFile(dir=data_dir, mode='w', delete=False, encoding="utf-8") self.log.info("Create linearization config file") with open(cfg_file, "a", encoding="utf-8") as cfg: cfg.write("datadir={}\n".format(data_dir)) cfg.write("rpcuser={}\n".format(node_url.username)) cfg.write("rpcpassword={}\n".format(node_url.password)) cfg.write("port={}\n".format(node_url.port)) cfg.write("host={}\n".format(node_url.hostname)) cfg.write("output_file={}\n".format(bootstrap_file)) cfg.write("max_height=100\n") cfg.write("netmagic=fabfb5da\n") cfg.write("input={}\n".format(blocks_dir)) cfg.write("genesis={}\n".format(genesis_block)) cfg.write("hashlist={}\n".format(hash_list.name)) base_dir = self.config["environment"]["SRCDIR"] linearize_dir = os.path.join(base_dir, "contrib", "linearize") self.log.info("Run linearization of block hashes") linearize_hashes_file = os.path.join( linearize_dir, "linearize-hashes.py") subprocess.run([sys.executable, linearize_hashes_file, cfg_file], stdout=hash_list, check=True) self.log.info("Run linearization of block data") linearize_data_file = os.path.join(linearize_dir, "linearize-data.py") subprocess.run([sys.executable, linearize_data_file, cfg_file], check=True) self.log.info("Restart second, unsynced node with bootstrap file") - self.stop_node(1) - self.start_node(1, ["-loadblock=" + bootstrap_file]) + self.restart_node(1, extra_args=["-loadblock=" + bootstrap_file]) # start_node is blocking on all block files being imported assert_equal(self.nodes[1].getblockcount(), 100) assert_equal(self.nodes[1].getblockchaininfo()['blocks'], 100) assert_equal( self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash()) if __name__ == '__main__': LoadblockTest().main() diff --git a/test/functional/feature_logging.py b/test/functional/feature_logging.py index 674035df6..86b86ed3b 100755 --- a/test/functional/feature_logging.py +++ b/test/functional/feature_logging.py @@ -1,77 +1,76 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test debug logging.""" import os from test_framework.test_framework import BitcoinTestFramework from test_framework.test_node import ErrorMatch class LoggingTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def relative_log_path(self, name): return os.path.join(self.nodes[0].datadir, self.chain, name) def run_test(self): # test default log file name default_log_path = self.relative_log_path("debug.log") assert os.path.isfile(default_log_path) # test alternative log file name in datadir self.restart_node(0, ["-debuglogfile=foo.log"]) assert os.path.isfile(self.relative_log_path("foo.log")) # test alternative log file name outside datadir tempname = os.path.join(self.options.tmpdir, "foo.log") self.restart_node(0, ["-debuglogfile={}".format(tempname)]) assert os.path.isfile(tempname) # check that invalid log (relative) will cause error invdir = self.relative_log_path("foo") invalidname = os.path.join("foo", "foo.log") self.stop_node(0) exp_stderr = r"Error: Could not open debug log file \S+$" self.nodes[0].assert_start_raises_init_error( ["-debuglogfile={}".format(invalidname)], exp_stderr, match=ErrorMatch.FULL_REGEX) assert not os.path.isfile(os.path.join(invdir, "foo.log")) # check that invalid log (relative) works after path exists self.stop_node(0) os.mkdir(invdir) self.start_node(0, ["-debuglogfile={}".format(invalidname)]) assert os.path.isfile(os.path.join(invdir, "foo.log")) # check that invalid log (absolute) will cause error self.stop_node(0) invdir = os.path.join(self.options.tmpdir, "foo") invalidname = os.path.join(invdir, "foo.log") self.nodes[0].assert_start_raises_init_error( ["-debuglogfile={}".format(invalidname)], exp_stderr, match=ErrorMatch.FULL_REGEX) assert not os.path.isfile(os.path.join(invdir, "foo.log")) # check that invalid log (absolute) works after path exists self.stop_node(0) os.mkdir(invdir) self.start_node(0, ["-debuglogfile={}".format(invalidname)]) assert os.path.isfile(os.path.join(invdir, "foo.log")) # check that -nodebuglogfile disables logging self.stop_node(0) os.unlink(default_log_path) assert not os.path.isfile(default_log_path) self.start_node(0, ["-nodebuglogfile"]) assert not os.path.isfile(default_log_path) # just sanity check no crash here - self.stop_node(0) - self.start_node(0, ["-debuglogfile={}".format(os.devnull)]) + self.restart_node(0, ["-debuglogfile={}".format(os.devnull)]) if __name__ == '__main__': LoggingTest().main() diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index c42154343..cfb0ab62c 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -1,524 +1,519 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the pruning code. WARNING: This test uses 4GB of disk space. This test takes 30 mins or more (up to 2 hours) """ import os from test_framework.blocktools import create_coinbase from test_framework.messages import CBlock, ToHex from test_framework.script import CScript, OP_RETURN, OP_NOP from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, disconnect_nodes, wait_until, ) # Rescans start at the earliest block up to 2 hours before a key timestamp, so # the manual prune RPC avoids pruning blocks in the same window to be # compatible with pruning based on key creation time. TIMESTAMP_WINDOW = 2 * 60 * 60 def mine_large_blocks(node, n): # Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN # followed by 950k of OP_NOP. This would be non-standard in a non-coinbase # transaction but is consensus valid. # Set the nTime if this is the first time this function has been called. # A static variable ensures that time is monotonicly increasing and is therefore # different for each block created => blockhash is unique. if "nTimes" not in mine_large_blocks.__dict__: mine_large_blocks.nTime = 0 # Get the block parameters for the first block big_script = CScript([OP_RETURN] + [OP_NOP] * 950000) best_block = node.getblock(node.getbestblockhash()) height = int(best_block["height"]) + 1 mine_large_blocks.nTime = max( mine_large_blocks.nTime, int(best_block["time"])) + 1 previousblockhash = int(best_block["hash"], 16) for _ in range(n): # Build the coinbase transaction (with large scriptPubKey) coinbase_tx = create_coinbase(height) coinbase_tx.vin[0].nSequence = 2 ** 32 - 1 coinbase_tx.vout[0].scriptPubKey = big_script coinbase_tx.rehash() # Build the block block = CBlock() block.nVersion = best_block["version"] block.hashPrevBlock = previousblockhash block.nTime = mine_large_blocks.nTime block.nBits = int('207fffff', 16) block.nNonce = 0 block.vtx = [coinbase_tx] block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Submit to the node node.submitblock(ToHex(block)) previousblockhash = block.sha256 height += 1 mine_large_blocks.nTime += 1 def calc_usage(blockdir): return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.) class PruneTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 6 self.supports_cli = False # Create nodes 0 and 1 to mine. # Create node 2 to test pruning. self.full_node_default_args = ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-checkblocks=5", "-noparkdeepreorg", "-maxreorgdepth=-1"] # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) # Create nodes 5 to test wallet in prune mode, but do not connect self.extra_args = [self.full_node_default_args, self.full_node_default_args, ["-maxreceivebuffer=20000", "-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-prune=550"]] self.rpc_timeout = 120 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self): self.setup_nodes() self.prunedir = os.path.join( self.nodes[2].datadir, self.chain, 'blocks', '') connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[1], self.nodes[2]) connect_nodes(self.nodes[0], self.nodes[2]) connect_nodes(self.nodes[0], self.nodes[3]) connect_nodes(self.nodes[0], self.nodes[4]) self.sync_blocks(self.nodes[0:5]) def setup_nodes(self): self.add_nodes(self.num_nodes, self.extra_args) self.start_nodes() for n in self.nodes: n.importprivkey( privkey=n.get_deterministic_priv_key().key, label='coinbase', rescan=False) def create_big_chain(self): # Start by creating some coinbases we can spend later self.nodes[1].generate(200) self.sync_blocks(self.nodes[0:2]) self.nodes[0].generate(150) # Then mine enough full blocks to create more than 550MiB of data mine_large_blocks(self.nodes[0], 645) self.sync_blocks(self.nodes[0:5]) def test_height_min(self): assert os.path.isfile(os.path.join( self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early" self.log.info("Success") self.log.info("Though we're already using more than 550MiB, current usage: {}".format( calc_usage(self.prunedir))) self.log.info( "Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full # blocks past the height cutoff will ensure this mine_large_blocks(self.nodes[0], 25) # Wait for blk00000.dat to be pruned wait_until(lambda: not os.path.isfile( os.path.join(self.prunedir, "blk00000.dat")), timeout=30) self.log.info("Success") usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: {}".format(usage)) assert_greater_than(550, usage) def create_chain_with_staleblocks(self): # Create stale blocks in manageable sized chunks self.log.info( "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") for j in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and # then reorg's when node0 reconnects disconnect_nodes(self.nodes[0], self.nodes[1]) disconnect_nodes(self.nodes[0], self.nodes[2]) # Mine 24 blocks in node 1 mine_large_blocks(self.nodes[1], 24) # Reorg back with 25 block chain from node 0 mine_large_blocks(self.nodes[0], 25) # Create connections in the order so both nodes can see the reorg # at the same time connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[0], self.nodes[2]) self.sync_blocks(self.nodes[0:3]) self.log.info("Usage can be over target because of high stale rate: {}".format( calc_usage(self.prunedir))) def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node # 0 and Node 2's tip. This will cause Node 2 to do a reorg requiring # 288 blocks of undo data to the reorg_test chain. height = self.nodes[1].getblockcount() self.log.info("Current block height: {}".format(height)) self.forkheight = height - 287 self.forkhash = self.nodes[1].getblockhash(self.forkheight) self.log.info("Invalidating block {} at height {}".format( self.forkhash, self.forkheight)) self.nodes[1].invalidateblock(self.forkhash) # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want. # So invalidate that fork as well, until we're on the same chain as # node 0/2 (but at an ancestor 288 blocks ago) mainchainhash = self.nodes[0].getblockhash(self.forkheight - 1) curhash = self.nodes[1].getblockhash(self.forkheight - 1) while curhash != mainchainhash: self.nodes[1].invalidateblock(curhash) curhash = self.nodes[1].getblockhash(self.forkheight - 1) assert self.nodes[1].getblockcount() == self.forkheight - 1 self.log.info("New best height: {}".format( self.nodes[1].getblockcount())) # Disconnect node1 and generate the new chain disconnect_nodes(self.nodes[0], self.nodes[1]) disconnect_nodes(self.nodes[1], self.nodes[2]) self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) self.log.info("Reconnect nodes") connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[1], self.nodes[2]) self.sync_blocks(self.nodes[0:3], timeout=120) self.log.info("Verify height on node 2: {}".format( self.nodes[2].getblockcount())) self.log.info("Usage possibly still high because of stale blocks in block files: {}".format( calc_usage(self.prunedir))) self.log.info( "Mine 220 more large blocks so we have requisite history") mine_large_blocks(self.nodes[0], 220) self.sync_blocks(self.nodes[0:3], timeout=120) usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: {}".format(usage)) assert_greater_than(550, usage) def reorg_back(self): # Verify that a block on the old main chain fork has been pruned away assert_raises_rpc_error( -1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(pruning, no data)']): self.nodes[2].verifychain(checklevel=4, nblocks=0) self.log.info( "Will need to redownload block {}".format(self.forkheight)) # Verify that we have enough history to reorg back to the fork point. # Although this is more than 288 blocks, because this chain was written # more recently and only its other 299 small and 220 large blocks are in # the block files after it, it is expected to still be retained. self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) first_reorg_height = self.nodes[2].getblockcount() curchainhash = self.nodes[2].getblockhash(self.mainchainheight) self.nodes[2].invalidateblock(curchainhash) goalbestheight = self.mainchainheight goalbesthash = self.mainchainhash2 # As of 0.10 the current block download logic is not able to reorg to # the original chain created in create_chain_with_stale_blocks because # it doesn't know of any peer that's on that chain from which to # redownload its missing blocks. Invalidate the reorg_test chain in # node 0 as well, it can successfully switch to the original chain # because it has all the block data. However it must mine enough blocks # to have a more work chain than the reorg_test chain in order to # trigger node 2's block download logic. At this point node 2 is within # 288 blocks of the fork point so it will preserve its ability to # reorg. if self.nodes[2].getblockcount() < self.mainchainheight: blocks_to_mine = first_reorg_height + 1 - self.mainchainheight self.log.info( "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: {}".format( blocks_to_mine)) self.nodes[0].invalidateblock(curchainhash) assert_equal(self.nodes[0].getblockcount(), self.mainchainheight) assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2) goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] goalbestheight = first_reorg_height + 1 self.log.info( "Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") # Wait for Node 2 to reorg to proper height wait_until(lambda: self.nodes[2].getblockcount( ) >= goalbestheight, timeout=900) assert_equal(self.nodes[2].getbestblockhash(), goalbesthash) # Verify we can now have the data for a block previously pruned assert_equal(self.nodes[2].getblock( self.forkhash)["height"], self.forkheight) def manual_test(self, node_number, use_timestamp): # at this point, node has 995 blocks and has not yet run in prune mode self.start_node(node_number) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500) # now re-start in manual pruning mode - self.stop_node(node_number) - self.start_node(node_number, extra_args=["-prune=1"]) + self.restart_node(node_number, extra_args=["-prune=1"]) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) def height(index): if use_timestamp: return node.getblockheader(node.getblockhash(index))[ "time"] + TIMESTAMP_WINDOW else: return index def prune(index): ret = node.pruneblockchain(height=height(index)) assert_equal(ret, node.getblockchaininfo()['pruneheight']) def has_block(index): return os.path.isfile(os.path.join( self.nodes[node_number].datadir, self.chain, "blocks", "blk{:05}.dat".format(index))) # should not prune because chain tip of node 3 (995) < PruneAfterHeight # (1000) assert_raises_rpc_error( -1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) # Save block transaction count before pruning, assert value block1_details = node.getblock(node.getblockhash(1)) assert_equal(block1_details["nTx"], len(block1_details["tx"])) # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) node.generate(6) assert_equal(node.getblockchaininfo()["blocks"], 1001) # Pruned block should still know the number of transactions assert_equal(node.getblockheader(node.getblockhash(1)) ["nTx"], block1_details["nTx"]) # negative heights should raise an exception assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10) # height=100 too low to prune first block file so this is a no-op prune(100) assert has_block( 0), "blk00000.dat is missing when should still be there" # Does nothing node.pruneblockchain(height(0)) assert has_block( 0), "blk00000.dat is missing when should still be there" # height=500 should prune first file prune(500) assert not has_block( 0), "blk00000.dat is still there, should be pruned by now" assert has_block( 1), "blk00001.dat is missing when should still be there" # height=650 should prune second file prune(650) assert not has_block( 1), "blk00001.dat is still there, should be pruned by now" # height=1000 should not prune anything more, because tip-288 is in # blk00002.dat. prune(1000) assert has_block( 2), "blk00002.dat is still there, should be pruned by now" # advance the tip so blk00002.dat and blk00003.dat can be pruned (the # last 288 blocks should now be in blk00004.dat) node.generate(288) prune(1000) assert not has_block( 2), "blk00002.dat is still there, should be pruned by now" assert not has_block( 3), "blk00003.dat is still there, should be pruned by now" # stop node, start back up with auto-prune at 550 MiB, make sure still # runs - self.stop_node(node_number) - self.start_node(node_number, extra_args=["-prune=550"]) + self.restart_node(node_number, extra_args=["-prune=550"]) self.log.info("Success") def wallet_test(self): # check that the pruning node's wallet is still in good shape self.log.info("Stop and start pruning node to trigger wallet rescan") - self.stop_node(2) - self.start_node( + self.restart_node( 2, extra_args=["-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Success") # check that wallet loads successfully when restarting a pruned node after IBD. # this was reported to fail in #7494. self.log.info("Syncing node 5 to test wallet") connect_nodes(self.nodes[0], self.nodes[5]) nds = [self.nodes[0], self.nodes[5]] self.sync_blocks(nds, wait=5, timeout=300) - # Stop and start to trigger rescan - self.stop_node(5) - self.start_node( + self.restart_node( 5, extra_args=["-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Success") def run_test(self): self.log.info("Warning! This test requires 4GB of disk space") self.log.info("Mining a big blockchain of 995 blocks") self.create_big_chain() # Chain diagram key: # * blocks on main chain # +,&,$,@ blocks on other forks # X invalidated block # N1 Node 1 # # Start by mining a simple chain that all nodes have # N0=N1=N2 **...*(995) # stop manual-pruning node with 995 blocks self.stop_node(3) self.stop_node(4) self.log.info( "Check that we haven't started pruning yet because we're below PruneAfterHeight") self.test_height_min() # Extend this chain past the PruneAfterHeight # N0=N1=N2 **...*(1020) self.log.info( "Check that we'll exceed disk space target if we have a very high stale block rate") self.create_chain_with_staleblocks() # Disconnect N0 # And mine a 24 block chain on N1 and a separate 25 block chain on N0 # N1=N2 **...*+...+(1044) # N0 **...**...**(1045) # # reconnect nodes causing reorg on N1 and N2 # N1=N2 **...*(1020) *...**(1045) # \ # +...+(1044) # # repeat this process until you have 12 stale forks hanging off the # main chain on N1 and N2 # N0 *************************...***************************(1320) # # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) # \ \ \ # +...+(1044) &.. $...$(1319) # Save some current chain state for later use self.mainchainheight = self.nodes[2].getblockcount() # 1320 self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) self.log.info("Check that we can survive a 288 block reorg still") self.reorg_test() # (1033, ) # Now create a 288 block reorg by mining a longer chain on N1 # First disconnect N1 # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain # N1 **...*(1020) **...**(1032)X.. # \ # ++...+(1031)X.. # # Now mine 300 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@(1332) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # Reconnect nodes and mine 220 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # N2 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ *...**(1320) # \ \ # ++...++(1044) .. # # N0 ********************(1032) @@...@@@(1552) # \ # *...**(1320) self.log.info( "Test that we can rerequest a block we previously pruned if needed for a reorg") self.reorg_back() # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to # original main chain (*), but will require redownload of some blocks # In order to have a peer we think we can download from, must also perform this invalidation # on N0 and mine a new longest chain to trigger. # Final result: # N0 ********************(1032) **...****(1553) # \ # X@...@@@(1552) # # N2 **...*(1020) **...**(1032) **...****(1553) # \ \ # \ X@...@@@(1552) # \ # +.. # # N1 doesn't change because 1033 on main chain (*) is invalid self.log.info("Test manual pruning with block indices") self.manual_test(3, use_timestamp=False) self.log.info("Test manual pruning with timestamps") self.manual_test(4, use_timestamp=True) self.log.info("Test wallet re-scan") self.wallet_test() self.log.info("Done") if __name__ == '__main__': PruneTest().main() diff --git a/test/functional/p2p_disconnect_ban.py b/test/functional/p2p_disconnect_ban.py index e16d6aa74..d0b5f8791 100755 --- a/test/functional/p2p_disconnect_ban.py +++ b/test/functional/p2p_disconnect_ban.py @@ -1,137 +1,136 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node disconnect and ban behavior""" import time from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, connect_nodes, wait_until, ) class DisconnectBanTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.supports_cli = False def run_test(self): self.log.info("Connect nodes both way") connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[1], self.nodes[0]) self.log.info("Test setban and listbanned RPCs") self.log.info("setban: successfully ban single IP address") # node1 should have 2 connections to node0 at this point assert_equal(len(self.nodes[1].getpeerinfo()), 2) self.nodes[1].setban(subnet="127.0.0.1", command="add") wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10) # all nodes must be disconnected at this point assert_equal(len(self.nodes[1].getpeerinfo()), 0) assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("clearbanned: successfully clear ban list") self.nodes[1].clearbanned() assert_equal(len(self.nodes[1].listbanned()), 0) self.nodes[1].setban("127.0.0.0/24", "add") self.log.info("setban: fail to ban an already banned subnet") assert_equal(len(self.nodes[1].listbanned()), 1) assert_raises_rpc_error( -23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add") self.log.info("setban: fail to ban an invalid subnet") assert_raises_rpc_error( -30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add") # still only one banned ip because 127.0.0.1 is within the range of # 127.0.0.0/24 assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("setban remove: fail to unban a non-banned subnet") assert_raises_rpc_error( -30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove") assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("setban remove: successfully unban subnet") self.nodes[1].setban("127.0.0.0/24", "remove") assert_equal(len(self.nodes[1].listbanned()), 0) self.nodes[1].clearbanned() assert_equal(len(self.nodes[1].listbanned()), 0) self.log.info("setban: test persistence across node restart") self.nodes[1].setban("127.0.0.0/32", "add") self.nodes[1].setban("127.0.0.0/24", "add") # Set the mocktime so we can control when bans expire old_time = int(time.time()) self.nodes[1].setmocktime(old_time) # ban for 1 seconds self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1000 seconds self.nodes[1].setban( "2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) listBeforeShutdown = self.nodes[1].listbanned() assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) # Move time forward by 3 seconds so the third ban has expired self.nodes[1].setmocktime(old_time + 3) assert_equal(len(self.nodes[1].listbanned()), 3) - self.stop_node(1) - self.start_node(1) + self.restart_node(1) listAfterShutdown = self.nodes[1].listbanned() assert_equal("127.0.0.0/24", listAfterShutdown[0]['address']) assert_equal("127.0.0.0/32", listAfterShutdown[1]['address']) assert_equal("/19" in listAfterShutdown[2]['address'], True) # Clear ban lists self.nodes[1].clearbanned() self.log.info("Connect nodes both way") connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[1], self.nodes[0]) self.log.info("Test disconnectnode RPCs") self.log.info( "disconnectnode: fail to disconnect when calling with address and nodeid") address1 = self.nodes[0].getpeerinfo()[0]['addr'] node1 = self.nodes[0].getpeerinfo()[0]['addr'] assert_raises_rpc_error( -32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1) self.log.info( "disconnectnode: fail to disconnect when calling with junk address") assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street") self.log.info( "disconnectnode: successfully disconnect node by address") address1 = self.nodes[0].getpeerinfo()[0]['addr'] self.nodes[0].disconnectnode(address=address1) wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10) assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1] self.log.info("disconnectnode: successfully reconnect node") # reconnect the node connect_nodes(self.nodes[0], self.nodes[1]) assert_equal(len(self.nodes[0].getpeerinfo()), 2) assert [node for node in self.nodes[0] .getpeerinfo() if node['addr'] == address1] self.log.info( "disconnectnode: successfully disconnect node by node id") id1 = self.nodes[0].getpeerinfo()[0]['id'] self.nodes[0].disconnectnode(nodeid=id1) wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10) assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1] if __name__ == '__main__': DisconnectBanTest().main() diff --git a/test/functional/wallet_abandonconflict.py b/test/functional/wallet_abandonconflict.py index 7f6f94017..f1805691a 100755 --- a/test/functional/wallet_abandonconflict.py +++ b/test/functional/wallet_abandonconflict.py @@ -1,228 +1,225 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the abandontransaction RPC. The abandontransaction RPC marks a transaction and all its in-wallet descendants as abandoned which allows their inputs to be respent. It can be used to replace "stuck" or evicted transactions. It only works on transactions which are not included in a block and are not currently in the mempool. It has no effect on transactions which are already abandoned. """ from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, connect_nodes, disconnect_nodes, satoshi_round, ) class AbandonConflictTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [["-minrelaytxfee=0.00001"], []] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): def total_fees(*txids): total = 0 for txid in txids: # '-=' is because gettransaction(txid)['fee'] returns a negative total -= self.nodes[0].gettransaction(txid)['fee'] return satoshi_round(total) self.nodes[1].generate(100) self.sync_blocks() balance = self.nodes[0].getbalance() txA = self.nodes[0].sendtoaddress( self.nodes[0].getnewaddress(), Decimal("10")) txB = self.nodes[0].sendtoaddress( self.nodes[0].getnewaddress(), Decimal("10")) txC = self.nodes[0].sendtoaddress( self.nodes[0].getnewaddress(), Decimal("10")) self.sync_mempools() self.nodes[1].generate(1) # Can not abandon non-wallet transaction assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', lambda: self.nodes[0].abandontransaction(txid='ff' * 32)) # Can not abandon confirmed transaction assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: self.nodes[0].abandontransaction(txid=txA)) self.sync_blocks() newbalance = self.nodes[0].getbalance() # no more than fees lost assert balance - newbalance <= total_fees(txA, txB, txC) balance = newbalance # Disconnect nodes so node0's transactions don't get into node1's # mempool disconnect_nodes(self.nodes[0], self.nodes[1]) # Identify the 10btc outputs nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction( txA)["details"] if tx_out["amount"] == Decimal("10")) nB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction( txB)["details"] if tx_out["amount"] == Decimal("10")) nC = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction( txC)["details"] if tx_out["amount"] == Decimal("10")) inputs = [] # spend 10btc outputs from txA and txB inputs.append({"txid": txA, "vout": nA}) inputs.append({"txid": txB, "vout": nB}) outputs = {} outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998") outputs[self.nodes[1].getnewaddress()] = Decimal("5") signed = self.nodes[0].signrawtransactionwithwallet( self.nodes[0].createrawtransaction(inputs, outputs)) txAB1 = self.nodes[0].sendrawtransaction(signed["hex"]) # Identify the 14.99998btc output nAB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction( txAB1)["details"] if tx_out["amount"] == Decimal("14.99998")) # Create a child tx spending AB1 and C inputs = [] # Amount 14.99998 BCH inputs.append({"txid": txAB1, "vout": nAB}) # Amount 10 BCH inputs.append({"txid": txC, "vout": nC}) outputs = {} outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996") signed2 = self.nodes[0].signrawtransactionwithwallet( self.nodes[0].createrawtransaction(inputs, outputs)) txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"]) # Create a child tx spending ABC2 signed3_change = Decimal("24.999") inputs = [{"txid": txABC2, "vout": 0}] outputs = {self.nodes[0].getnewaddress(): signed3_change} signed3 = self.nodes[0].signrawtransactionwithwallet( self.nodes[0].createrawtransaction(inputs, outputs)) # note tx is never directly referenced, only abandoned as a child of # the above self.nodes[0].sendrawtransaction(signed3["hex"]) # In mempool txs from self should increase balance from change newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("30") + signed3_change) balance = newbalance # Restart the node with a higher min relay fee so the parent tx is no longer in mempool # TODO: redo with eviction - self.stop_node(0) - self.start_node(0, extra_args=["-minrelaytxfee=0.0001"]) + self.restart_node(0, extra_args=["-minrelaytxfee=0.0001"]) assert self.nodes[0].getmempoolinfo()['loaded'] # Verify txs no longer in either node's mempool assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(len(self.nodes[1].getrawmempool()), 0) # Transactions which are not in the mempool should only reduce wallet balance. # Transaction inputs should still be spent, but the change not yet # received. newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - signed3_change) # Unconfirmed received funds that are not in mempool also shouldn't show # up in unconfirmed balance. Note that the transactions stored in the wallet # are not necessarily in the node's mempool. balances = self.nodes[0].getbalances()['mine'] assert_equal( balances['untrusted_pending'] + balances['trusted'], newbalance) # Unconfirmed transactions which are not in the mempool should also # not be in listunspent assert txABC2 not in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)] balance = newbalance # Abandon original transaction and verify inputs are available again # including that the child tx was also abandoned self.nodes[0].abandontransaction(txAB1) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance + Decimal("30")) balance = newbalance # Verify that even with a low min relay fee, the tx is not re-accepted # from wallet on startup once abandoned. - self.stop_node(0) - self.start_node(0, extra_args=["-minrelaytxfee=0.00001"]) + self.restart_node(0, extra_args=["-minrelaytxfee=0.00001"]) assert self.nodes[0].getmempoolinfo()['loaded'] assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(self.nodes[0].getbalance(), balance) # If the transaction is re-sent the wallet also unabandons it. The # change should be available, and it's child transaction should remain # abandoned. # NOTE: Abandoned transactions are internal to the wallet, and tracked # separately from other indices. self.nodes[0].sendrawtransaction(signed["hex"]) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998")) balance = newbalance # Send child tx again so it is no longer abandoned. self.nodes[0].sendrawtransaction(signed2["hex"]) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996")) balance = newbalance # Reset to a higher relay fee so that we abandon a transaction - self.stop_node(0) - self.start_node(0, extra_args=["-minrelaytxfee=0.0001"]) + self.restart_node(0, extra_args=["-minrelaytxfee=0.0001"]) assert self.nodes[0].getmempoolinfo()['loaded'] assert_equal(len(self.nodes[0].getrawmempool()), 0) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("24.9996")) balance = newbalance # Create a double spend of AB1. Spend it again from only A's 10 output. # Mine double spend from node 1. inputs = [] inputs.append({"txid": txA, "vout": nA}) outputs = {} outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999") tx = self.nodes[0].createrawtransaction(inputs, outputs) signed = self.nodes[0].signrawtransactionwithwallet(tx) self.nodes[1].sendrawtransaction(signed["hex"]) self.nodes[1].generate(1) connect_nodes(self.nodes[0], self.nodes[1]) self.sync_blocks() # Verify that B and C's 10 BCH outputs are available for spending again # because AB1 is now conflicted newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance + Decimal("20")) balance = newbalance # There is currently a minor bug around this and so this test doesn't work. See Issue #7315 # Invalidate the block with the double spend and B's 10 BCH output should no longer be available # Don't think C's should either self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) newbalance = self.nodes[0].getbalance() # assert_equal(newbalance, balance - Decimal("10")) self.log.info( "If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer") self.log.info( "conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315") self.log.info(str(balance) + " -> " + str(newbalance) + " ?") if __name__ == '__main__': AbandonConflictTest().main() diff --git a/test/functional/wallet_avoidreuse.py b/test/functional/wallet_avoidreuse.py index 6c884d8e6..c999e6e82 100755 --- a/test/functional/wallet_avoidreuse.py +++ b/test/functional/wallet_avoidreuse.py @@ -1,452 +1,451 @@ #!/usr/bin/env python3 # Copyright (c) 2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the avoid_reuse and setwalletflag features.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_approx, assert_equal, assert_raises_rpc_error, connect_nodes, ) def reset_balance(node, discardaddr): '''Throw away all owned coins by the node so it gets a balance of 0.''' balance = node.getbalance(avoid_reuse=False) if balance > 0.5: node.sendtoaddress( address=discardaddr, amount=balance, subtractfeefromamount=True, avoid_reuse=False) def count_unspent(node): '''Count the unspent outputs for the given node and return various statistics''' r = { "total": { "count": 0, "sum": 0, }, "reused": { "count": 0, "sum": 0, }, } supports_reused = True for utxo in node.listunspent(minconf=0): r["total"]["count"] += 1 r["total"]["sum"] += utxo["amount"] if supports_reused and "reused" in utxo: if utxo["reused"]: r["reused"]["count"] += 1 r["reused"]["sum"] += utxo["amount"] else: supports_reused = False r["reused"]["supported"] = supports_reused return r def assert_unspent(node, total_count=None, total_sum=None, reused_supported=None, reused_count=None, reused_sum=None): '''Make assertions about a node's unspent output statistics''' stats = count_unspent(node) if total_count is not None: assert_equal(stats["total"]["count"], total_count) if total_sum is not None: assert_approx(stats["total"]["sum"], total_sum, 0.001) if reused_supported is not None: assert_equal(stats["reused"]["supported"], reused_supported) if reused_count is not None: assert_equal(stats["reused"]["count"], reused_count) if reused_sum is not None: assert_approx(stats["reused"]["sum"], reused_sum, 0.001) def assert_balances(node, mine): '''Make assertions about a node's getbalances output''' got = node.getbalances()["mine"] for k, v in mine.items(): assert_approx(got[k], v, 0.001) class AvoidReuseTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = False self.num_nodes = 2 # This test isn't testing txn relay/timing, so set whitelist on the # peers for instant txn relay. This speeds up the test run time 2-3x. self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): '''Set up initial chain and run tests defined below''' self.test_persistence() self.test_immutable() self.nodes[0].generate(110) self.sync_all() self.test_change_remains_change(self.nodes[1]) reset_balance(self.nodes[1], self.nodes[0].getnewaddress()) self.test_sending_from_reused_address_without_avoid_reuse() reset_balance(self.nodes[1], self.nodes[0].getnewaddress()) self.test_sending_from_reused_address_fails() reset_balance(self.nodes[1], self.nodes[0].getnewaddress()) self.test_getbalances_used() reset_balance(self.nodes[1], self.nodes[0].getnewaddress()) self.test_full_destination_group_is_preferred() reset_balance(self.nodes[1], self.nodes[0].getnewaddress()) self.test_all_destination_groups_are_used() def test_persistence(self): '''Test that wallet files persist the avoid_reuse flag.''' self.log.info("Test wallet files persist avoid_reuse flag") # Configure node 1 to use avoid_reuse self.nodes[1].setwalletflag('avoid_reuse') # Flags should be node1.avoid_reuse=false, node2.avoid_reuse=true assert_equal(self.nodes[0].getwalletinfo()["avoid_reuse"], False) assert_equal(self.nodes[1].getwalletinfo()["avoid_reuse"], True) # Stop and restart node 1 - self.stop_node(1) - self.start_node(1) + self.restart_node(1) connect_nodes(self.nodes[0], self.nodes[1]) # Flags should still be node1.avoid_reuse=false, node2.avoid_reuse=true assert_equal(self.nodes[0].getwalletinfo()["avoid_reuse"], False) assert_equal(self.nodes[1].getwalletinfo()["avoid_reuse"], True) # Attempting to set flag to its current state should throw assert_raises_rpc_error(-8, "Wallet flag is already set to false", self.nodes[0].setwalletflag, 'avoid_reuse', False) assert_raises_rpc_error(-8, "Wallet flag is already set to true", self.nodes[1].setwalletflag, 'avoid_reuse', True) def test_immutable(self): '''Test immutable wallet flags''' self.log.info("Test immutable wallet flags") # Attempt to set the disable_private_keys flag; this should not work assert_raises_rpc_error(-8, "Wallet flag is immutable", self.nodes[1].setwalletflag, 'disable_private_keys') tempwallet = ".wallet_avoidreuse.py_test_immutable_wallet.dat" # Create a wallet with disable_private_keys set; this should work self.nodes[1].createwallet(tempwallet, True) w = self.nodes[1].get_wallet_rpc(tempwallet) # Attempt to unset the disable_private_keys flag; this should not work assert_raises_rpc_error(-8, "Wallet flag is immutable", w.setwalletflag, 'disable_private_keys', False) # Unload temp wallet self.nodes[1].unloadwallet(tempwallet) def test_change_remains_change(self, node): self.log.info( "Test that change doesn't turn into non-change when spent") reset_balance(node, node.getnewaddress()) addr = node.getnewaddress() txid = node.sendtoaddress(addr, 1) out = node.listunspent(minconf=0, query_options={'minimumAmount': 2}) assert_equal(len(out), 1) assert_equal(out[0]['txid'], txid) changeaddr = out[0]['address'] # Make sure it's starting out as change as expected assert node.getaddressinfo(changeaddr)['ischange'] for logical_tx in node.listtransactions(): assert logical_tx.get('address') != changeaddr # Spend it reset_balance(node, node.getnewaddress()) # It should still be change assert node.getaddressinfo(changeaddr)['ischange'] for logical_tx in node.listtransactions(): assert logical_tx.get('address') != changeaddr def test_sending_from_reused_address_without_avoid_reuse(self): ''' Test the same as test_sending_from_reused_address_fails, except send the 10 BCH with the avoid_reuse flag set to false. This means the 10 BTC send should succeed, where it fails in test_sending_from_reused_address_fails. ''' self.log.info( "Test sending from reused address with avoid_reuse=false") fundaddr = self.nodes[1].getnewaddress() retaddr = self.nodes[0].getnewaddress() self.nodes[0].sendtoaddress(fundaddr, 10) self.nodes[0].generate(1) self.sync_all() # listunspent should show 1 single, unused 10 BCH output assert_unspent( self.nodes[1], total_count=1, total_sum=10, reused_supported=True, reused_count=0) # getbalances should show no used, 10 BCH trusted assert_balances(self.nodes[1], mine={"used": 0, "trusted": 10}) # node 0 should not show a used entry, as it does not enable # avoid_reuse assert("used" not in self.nodes[0].getbalances()["mine"]) self.nodes[1].sendtoaddress(retaddr, 5) self.nodes[0].generate(1) self.sync_all() # listunspent should show 1 single, unused 5 BCH output assert_unspent( self.nodes[1], total_count=1, total_sum=5, reused_supported=True, reused_count=0) # getbalances should show no used, 5 BCH trusted assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5}) self.nodes[0].sendtoaddress(fundaddr, 10) self.nodes[0].generate(1) self.sync_all() # listunspent should show 2 total outputs (5, 10 BCH), one unused (5), # one reused (10) assert_unspent( self.nodes[1], total_count=2, total_sum=15, reused_count=1, reused_sum=10) # getbalances should show 10 used, 5 BCH trusted assert_balances(self.nodes[1], mine={"used": 10, "trusted": 5}) self.nodes[1].sendtoaddress( address=retaddr, amount=10, avoid_reuse=False) # listunspent should show 1 total outputs (5 BCH), unused assert_unspent( self.nodes[1], total_count=1, total_sum=5, reused_count=0) # getbalances should show no used, 5 BCH trusted assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5}) # node 1 should now have about 5 BCH left (for both cases) assert_approx(self.nodes[1].getbalance(), 5, 0.001) assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 5, 0.001) def test_sending_from_reused_address_fails(self): ''' Test the simple case where [1] generates a new address A, then [0] sends 10 BCH to A. [1] spends 5 BCH from A. (leaving roughly 5 BCH useable) [0] sends 10 BCH to A again. [1] tries to spend 10 BCH (fails; dirty). [1] tries to spend 4 BCH (succeeds; change address sufficient) ''' self.log.info("Test sending from reused address fails") fundaddr = self.nodes[1].getnewaddress(label="", address_type="legacy") retaddr = self.nodes[0].getnewaddress() self.nodes[0].sendtoaddress(fundaddr, 10) self.nodes[0].generate(1) self.sync_all() # listunspent should show 1 single, unused 10 BCH output assert_unspent( self.nodes[1], total_count=1, total_sum=10, reused_supported=True, reused_count=0) # getbalances should show no used, 10 BCH trusted assert_balances(self.nodes[1], mine={"used": 0, "trusted": 10}) self.nodes[1].sendtoaddress(retaddr, 5) self.nodes[0].generate(1) self.sync_all() # listunspent should show 1 single, unused 5 BCH output assert_unspent( self.nodes[1], total_count=1, total_sum=5, reused_supported=True, reused_count=0) # getbalances should show no used, 5 BCH trusted assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5}) # For the second send, we transmute it to a related single-key address # to make sure it's also detected as re-use # NB: this is not very useful for ABC, but we keep the new variable # name for consistency. new_fundaddr = fundaddr self.nodes[0].sendtoaddress(new_fundaddr, 10) self.nodes[0].generate(1) self.sync_all() # listunspent should show 2 total outputs (5, 10 BCH), one unused (5), # one reused (10) assert_unspent( self.nodes[1], total_count=2, total_sum=15, reused_count=1, reused_sum=10) # getbalances should show 10 used, 5 BCH trusted assert_balances(self.nodes[1], mine={"used": 10, "trusted": 5}) # node 1 should now have a balance of 5 (no dirty) or 15 (including # dirty) assert_approx(self.nodes[1].getbalance(), 5, 0.001) assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 15, 0.001) assert_raises_rpc_error(-6, "Insufficient funds", self.nodes[1].sendtoaddress, retaddr, 10) self.nodes[1].sendtoaddress(retaddr, 4) # listunspent should show 2 total outputs (1, 10 BCH), one unused (1), # one reused (10) assert_unspent( self.nodes[1], total_count=2, total_sum=11, reused_count=1, reused_sum=10) # getbalances should show 10 used, 1 BCH trusted assert_balances(self.nodes[1], mine={"used": 10, "trusted": 1}) # node 1 should now have about 1 BCH left (no dirty) and 11 (including # dirty) assert_approx(self.nodes[1].getbalance(), 1, 0.001) assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 11, 0.001) def test_getbalances_used(self): ''' getbalances and listunspent should pick up on reused addresses immediately, even for address reusing outputs created before the first transaction was spending from that address ''' self.log.info("Test getbalances used category") # node under test should be completely empty assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0) new_addr = self.nodes[1].getnewaddress() ret_addr = self.nodes[0].getnewaddress() # send multiple transactions, reusing one address for _ in range(11): self.nodes[0].sendtoaddress(new_addr, 1) self.nodes[0].generate(1) self.sync_all() # send transaction that should not use all the available outputs # per the current coin selection algorithm self.nodes[1].sendtoaddress(ret_addr, 5) # getbalances and listunspent should show the remaining outputs # in the reused address as used/reused assert_unspent( self.nodes[1], total_count=2, total_sum=6, reused_count=1, reused_sum=1) assert_balances(self.nodes[1], mine={"used": 1, "trusted": 5}) def test_full_destination_group_is_preferred(self): ''' Test the case where [1] only has 11 outputs of 1 BCH in the same reused address and tries to send a small payment of 0.5 BCH. The wallet should use 10 outputs from the reused address as inputs and not a single 1 BCH input, in order to join several outputs from the reused address. ''' self.log.info( "Test that full destination groups are preferred in coin selection") # Node under test should be empty assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0) new_addr = self.nodes[1].getnewaddress() ret_addr = self.nodes[0].getnewaddress() # Send 11 outputs of 1 BCH to the same, reused address in the wallet for _ in range(11): self.nodes[0].sendtoaddress(new_addr, 1) self.nodes[0].generate(1) self.sync_all() # Sending a transaction that is smaller than each one of the # available outputs txid = self.nodes[1].sendtoaddress(address=ret_addr, amount=0.5) inputs = self.nodes[1].getrawtransaction(txid, 1)["vin"] # The transaction should use 10 inputs exactly assert_equal(len(inputs), 10) def test_all_destination_groups_are_used(self): ''' Test the case where [1] only has 22 outputs of 1 BCH in the same reused address and tries to send a payment of 20.5 BCH. The wallet should use all 22 outputs from the reused address as inputs. ''' self.log.info("Test that all destination groups are used") # Node under test should be empty assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0) new_addr = self.nodes[1].getnewaddress() ret_addr = self.nodes[0].getnewaddress() # Send 22 outputs of 1 BCH to the same, reused address in the wallet for _ in range(22): self.nodes[0].sendtoaddress(new_addr, 1) self.nodes[0].generate(1) self.sync_all() # Sending a transaction that needs to use the full groups # of 10 inputs but also the incomplete group of 2 inputs. txid = self.nodes[1].sendtoaddress(address=ret_addr, amount=20.5) inputs = self.nodes[1].getrawtransaction(txid, 1)["vin"] # The transaction should use 22 inputs exactly assert_equal(len(inputs), 22) if __name__ == '__main__': AvoidReuseTest().main() diff --git a/test/functional/wallet_dump.py b/test/functional/wallet_dump.py index 4290c6ae7..064cfb497 100755 --- a/test/functional/wallet_dump.py +++ b/test/functional/wallet_dump.py @@ -1,222 +1,221 @@ #!/usr/bin/env python3 # Copyright (c) 2016-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the dumpwallet RPC.""" import datetime import os import time from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, ) def read_dump(file_name, addrs, script_addrs, hd_master_addr_old): """ Read the given dump, count the addrs that match, count change and reserve. Also check that the old hd_master is inactive """ with open(file_name, encoding='utf8') as inputfile: found_addr = 0 found_comments = [] found_script_addr = 0 found_addr_chg = 0 found_addr_rsv = 0 hd_master_addr_ret = None for line in inputfile: line = line.strip() if not line: continue if line[0] == '#': found_comments.append(line) else: # split out some data key_date_label, comment = line.split("#") key_date_label = key_date_label.split(" ") # key = key_date_label[0] date = key_date_label[1] keytype = key_date_label[2] imported_key = date == '1970-01-01T00:00:01Z' if imported_key: # Imported keys have multiple addresses, no label (keypath) and timestamp # Skip them continue addr_keypath = comment.split(" addr=")[1] addr = addr_keypath.split(" ")[0] keypath = None if keytype == "inactivehdseed=1": # ensure the old master is still available assert hd_master_addr_old == addr elif keytype == "hdseed=1": # ensure we have generated a new hd master key assert hd_master_addr_old != addr hd_master_addr_ret = addr elif keytype == "script=1": # scripts don't have keypaths keypath = None else: keypath = addr_keypath.rstrip().split("hdkeypath=")[1] # count key types for addrObj in addrs: if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=": found_addr += 1 break elif keytype == "change=1": found_addr_chg += 1 break elif keytype == "reserve=1": found_addr_rsv += 1 break # count scripts for script_addr in script_addrs: if script_addr == addr.rstrip() and keytype == "script=1": found_script_addr += 1 break return found_comments, found_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret class WalletDumpTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [["-keypool=90"]] self.rpc_timeout = 120 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self): self.add_nodes(self.num_nodes, extra_args=self.extra_args) self.start_nodes() def run_test(self): wallet_unenc_dump = os.path.join( self.nodes[0].datadir, "wallet.unencrypted.dump") wallet_enc_dump = os.path.join( self.nodes[0].datadir, "wallet.encrypted.dump") # generate 20 addresses to compare against the dump test_addr_count = 20 addrs = [] for i in range(0, test_addr_count): addr = self.nodes[0].getnewaddress() vaddr = self.nodes[0].getaddressinfo( addr) # required to get hd keypath addrs.append(vaddr) # Should be a no-op: self.nodes[0].keypoolrefill() # Test scripts dump by adding a 1-of-1 multisig address multisig_addr = self.nodes[0].addmultisigaddress( 1, [addrs[0]["address"]])["address"] self.log.info('Mine a block one second before the wallet is dumped') dump_time = int(time.time()) self.nodes[0].setmocktime(dump_time - 1) self.nodes[0].generate(1) self.nodes[0].setmocktime(dump_time) dump_time_str = '# * Created on {}Z'.format( datetime.datetime.fromtimestamp( dump_time, tz=datetime.timezone.utc, ).replace(tzinfo=None).isoformat()) dump_best_block_1 = '# * Best block at time of backup was {} ({}),'.format( self.nodes[0].getblockcount(), self.nodes[0].getbestblockhash(), ) dump_best_block_2 = '# mined on {}Z'.format( datetime.datetime.fromtimestamp( dump_time - 1, tz=datetime.timezone.utc, ).replace(tzinfo=None).isoformat()) self.log.info('Dump unencrypted wallet') result = self.nodes[0].dumpwallet(wallet_unenc_dump) assert_equal(result['filename'], wallet_unenc_dump) found_comments, found_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \ read_dump(wallet_unenc_dump, addrs, [multisig_addr], None) # Check that file is not corrupt assert '# End of dump' in found_comments assert_equal( dump_time_str, next( c for c in found_comments if c.startswith('# * Created on'))) assert_equal( dump_best_block_1, next( c for c in found_comments if c.startswith('# * Best block'))) assert_equal( dump_best_block_2, next( c for c in found_comments if c.startswith('# mined on'))) # all keys must be in the dump assert_equal(found_addr, test_addr_count) # all scripts must be in the dump assert_equal(found_script_addr, 1) # 0 blocks where mined assert_equal(found_addr_chg, 0) # 90 keys plus 100% internal keys assert_equal(found_addr_rsv, 90 * 2) # encrypt wallet, restart, unlock and dump self.nodes[0].encryptwallet('test') self.nodes[0].walletpassphrase('test', 10) # Should be a no-op: self.nodes[0].keypoolrefill() self.nodes[0].dumpwallet(wallet_enc_dump) found_comments, found_addr, found_script_addr, found_addr_chg, found_addr_rsv, _ = \ read_dump( wallet_enc_dump, addrs, [multisig_addr], hd_master_addr_unenc) # Check that file is not corrupt assert '# End of dump' in found_comments assert_equal(found_addr, test_addr_count) assert_equal( dump_time_str, next( c for c in found_comments if c.startswith('# * Created on'))) assert_equal( dump_best_block_1, next( c for c in found_comments if c.startswith('# * Best block'))) assert_equal( dump_best_block_2, next( c for c in found_comments if c.startswith('# mined on'))) assert_equal(found_script_addr, 1) # old reserve keys are marked as change now assert_equal(found_addr_chg, 90 * 2) assert_equal(found_addr_rsv, 90 * 2) # Overwriting should fail assert_raises_rpc_error(-8, "already exists", lambda: self.nodes[0].dumpwallet(wallet_enc_dump)) # Restart node with new wallet, and test importwallet - self.stop_node(0) - self.start_node(0, ['-wallet=w2']) + self.restart_node(0, ['-wallet=w2']) # Make sure the address is not IsMine before import result = self.nodes[0].getaddressinfo(multisig_addr) assert result['ismine'] is False self.nodes[0].importwallet(wallet_unenc_dump) # Now check IsMine is true result = self.nodes[0].getaddressinfo(multisig_addr) assert result['ismine'] is True self.log.info('Check that wallet is flushed') with self.nodes[0].assert_debug_log(['Flushing wallet.dat'], timeout=20): self.nodes[0].getnewaddress() if __name__ == '__main__': WalletDumpTest().main() diff --git a/test/functional/wallet_hd.py b/test/functional/wallet_hd.py index 773984ff4..91a7f2bc5 100755 --- a/test/functional/wallet_hd.py +++ b/test/functional/wallet_hd.py @@ -1,321 +1,319 @@ #!/usr/bin/env python3 # Copyright (c) 2016-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test Hierarchical Deterministic wallet function.""" import os import shutil from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes, assert_raises_rpc_error ) class WalletHDTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [[], ['-keypool=0']] self.supports_cli = False def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): # Make sure we use hd, keep masterkeyid masterkeyid = self.nodes[1].getwalletinfo()['hdseedid'] assert_equal(len(masterkeyid), 40) # create an internal key change_addr = self.nodes[1].getrawchangeaddress() change_addrV = self.nodes[1].getaddressinfo(change_addr) # first internal child key assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") # Import a non-HD private key in the HD wallet non_hd_add = self.nodes[0].getnewaddress() self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add)) # This should be enough to keep the master key and the non-HD key self.nodes[1].backupwallet( os.path.join(self.nodes[1].datadir, "hd.bak")) # self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, "hd.dump")) # Derive some HD addresses and remember the last # Also send funds to each add self.nodes[0].generate(101) hd_add = None NUM_HD_ADDS = 10 for i in range(NUM_HD_ADDS): hd_add = self.nodes[1].getnewaddress() hd_info = self.nodes[1].getaddressinfo(hd_add) assert_equal(hd_info["hdkeypath"], "m/0'/0'/" + str(i) + "'") assert_equal(hd_info["hdseedid"], masterkeyid) self.nodes[0].sendtoaddress(hd_add, 1) self.nodes[0].generate(1) self.nodes[0].sendtoaddress(non_hd_add, 1) self.nodes[0].generate(1) # create an internal key (again) change_addr = self.nodes[1].getrawchangeaddress() change_addrV = self.nodes[1].getaddressinfo(change_addr) # second internal child key assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") self.sync_all() assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1) self.log.info("Restore backup ...") self.stop_node(1) # we need to delete the complete regtest directory # otherwise node1 would auto-recover all funds in flag the keypool keys # as used shutil.rmtree( os.path.join( self.nodes[1].datadir, self.chain, "blocks")) shutil.rmtree(os.path.join( self.nodes[1].datadir, self.chain, "chainstate")) shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join( self.nodes[1].datadir, self.chain, "wallets", "wallet.dat")) self.start_node(1) # Assert that derivation is deterministic hd_add_2 = None for i in range(NUM_HD_ADDS): hd_add_2 = self.nodes[1].getnewaddress() hd_info_2 = self.nodes[1].getaddressinfo(hd_add_2) assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/" + str(i) + "'") assert_equal(hd_info_2["hdseedid"], masterkeyid) assert_equal(hd_add, hd_add_2) connect_nodes(self.nodes[0], self.nodes[1]) self.sync_all() # Needs rescan - self.stop_node(1) - self.start_node(1, extra_args=self.extra_args[1] + ['-rescan']) + self.restart_node(1, extra_args=self.extra_args[1] + ['-rescan']) assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1) # Try a RPC based rescan self.stop_node(1) shutil.rmtree( os.path.join( self.nodes[1].datadir, self.chain, "blocks")) shutil.rmtree(os.path.join( self.nodes[1].datadir, self.chain, "chainstate")) shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join( self.nodes[1].datadir, self.chain, "wallets", "wallet.dat")) self.start_node(1, extra_args=self.extra_args[1]) connect_nodes(self.nodes[0], self.nodes[1]) self.sync_all() # Wallet automatically scans blocks older than key on startup assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1) out = self.nodes[1].rescanblockchain(0, 1) assert_equal(out['start_height'], 0) assert_equal(out['stop_height'], 1) out = self.nodes[1].rescanblockchain(2, 4) assert_equal(out['start_height'], 2) assert_equal(out['stop_height'], 4) out = self.nodes[1].rescanblockchain(3) assert_equal(out['start_height'], 3) assert_equal(out['stop_height'], self.nodes[1].getblockcount()) out = self.nodes[1].rescanblockchain() assert_equal(out['start_height'], 0) assert_equal(out['stop_height'], self.nodes[1].getblockcount()) assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1) # send a tx and make sure its using the internal chain for the # changeoutput txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1) outs = self.nodes[1].decoderawtransaction( self.nodes[1].gettransaction(txid)['hex'])['vout'] keypath = "" for out in outs: if out['value'] != 1: keypath = self.nodes[1].getaddressinfo( out['scriptPubKey']['addresses'][0])['hdkeypath'] assert_equal(keypath[0:7], "m/0'/1'") # Generate a new HD seed on node 1 and make sure it is set orig_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid'] self.nodes[1].sethdseed() new_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid'] assert orig_masterkeyid != new_masterkeyid addr = self.nodes[1].getnewaddress() # Make sure the new address is the first from the keypool assert_equal(self.nodes[1].getaddressinfo( addr)['hdkeypath'], 'm/0\'/0\'/0\'') # Fill keypool with 1 key self.nodes[1].keypoolrefill(1) # Set a new HD seed on node 1 without flushing the keypool new_seed = self.nodes[0].dumpprivkey(self.nodes[0].getnewaddress()) orig_masterkeyid = new_masterkeyid self.nodes[1].sethdseed(False, new_seed) new_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid'] assert orig_masterkeyid != new_masterkeyid addr = self.nodes[1].getnewaddress() assert_equal(orig_masterkeyid, self.nodes[1].getaddressinfo( addr)['hdseedid']) # Make sure the new address continues previous keypool assert_equal(self.nodes[1].getaddressinfo( addr)['hdkeypath'], 'm/0\'/0\'/1\'') # Check that the next address is from the new seed self.nodes[1].keypoolrefill(1) next_addr = self.nodes[1].getnewaddress() assert_equal(new_masterkeyid, self.nodes[1].getaddressinfo( next_addr)['hdseedid']) # Make sure the new address is not from previous keypool assert_equal(self.nodes[1].getaddressinfo( next_addr)['hdkeypath'], 'm/0\'/0\'/0\'') assert next_addr != addr # Sethdseed parameter validity assert_raises_rpc_error(-1, 'sethdseed', self.nodes[0].sethdseed, False, new_seed, 0) assert_raises_rpc_error(-5, "Invalid private key", self.nodes[1].sethdseed, False, "not_wif") assert_raises_rpc_error(-1, "JSON value is not a boolean as expected", self.nodes[1].sethdseed, "Not_bool") assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[1].sethdseed, False, True) assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, new_seed) assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, self.nodes[1].dumpprivkey(self.nodes[1].getnewaddress())) self.log.info( 'Test sethdseed restoring with keys outside of the initial keypool') self.nodes[0].generate(10) # Restart node 1 with keypool of 3 and a different wallet self.nodes[1].createwallet(wallet_name='origin', blank=True) - self.stop_node(1) - self.start_node(1, extra_args=['-keypool=3', '-wallet=origin']) + self.restart_node(1, extra_args=['-keypool=3', '-wallet=origin']) connect_nodes(self.nodes[0], self.nodes[1]) # sethdseed restoring and seeing txs to addresses out of the # keypool origin_rpc = self.nodes[1].get_wallet_rpc('origin') seed = self.nodes[0].dumpprivkey(self.nodes[0].getnewaddress()) origin_rpc.sethdseed(True, seed) self.nodes[1].createwallet(wallet_name='restore', blank=True) restore_rpc = self.nodes[1].get_wallet_rpc('restore') # Set to be the same seed as origin_rpc restore_rpc.sethdseed(True, seed) # Rotate to a new seed, making original `seed` inactive restore_rpc.sethdseed(True) self.nodes[1].createwallet(wallet_name='restore2', blank=True) restore2_rpc = self.nodes[1].get_wallet_rpc('restore2') # Set to be the same seed as origin_rpc restore2_rpc.sethdseed(True, seed) # Rotate to a new seed, making original `seed` inactive restore2_rpc.sethdseed(True) # Check persistence of inactive seed by reloading restore. restore2 # is still loaded to test the case where the wallet is not reloaded restore_rpc.unloadwallet() self.nodes[1].loadwallet('restore') restore_rpc = self.nodes[1].get_wallet_rpc('restore') # Empty origin keypool and get an address that is beyond the # initial keypool origin_rpc.getnewaddress() origin_rpc.getnewaddress() # Last address of initial keypool last_addr = origin_rpc.getnewaddress() # First address beyond initial keypool addr = origin_rpc.getnewaddress() # Check that the restored seed has last_addr but does not have addr info = restore_rpc.getaddressinfo(last_addr) assert_equal(info['ismine'], True) info = restore_rpc.getaddressinfo(addr) assert_equal(info['ismine'], False) info = restore2_rpc.getaddressinfo(last_addr) assert_equal(info['ismine'], True) info = restore2_rpc.getaddressinfo(addr) assert_equal(info['ismine'], False) # Check that the origin seed has addr info = origin_rpc.getaddressinfo(addr) assert_equal(info['ismine'], True) # Send a transaction to addr, which is out of the initial keypool. # The wallet that has set a new seed (restore_rpc) should not # detect this transaction. txid = self.nodes[0].sendtoaddress(addr, 1) origin_rpc.sendrawtransaction( self.nodes[0].gettransaction(txid)['hex']) self.nodes[0].generate(1) self.sync_blocks() origin_rpc.gettransaction(txid) assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore_rpc.gettransaction, txid) out_of_kp_txid = txid # Send a transaction to last_addr, which is in the initial keypool. # The wallet that has set a new seed (restore_rpc) should detect this # transaction and generate 3 new keys from the initial seed. # The previous transaction (out_of_kp_txid) should still not be # detected as a rescan is required. txid = self.nodes[0].sendtoaddress(last_addr, 1) origin_rpc.sendrawtransaction( self.nodes[0].gettransaction(txid)['hex']) self.nodes[0].generate(1) self.sync_blocks() origin_rpc.gettransaction(txid) restore_rpc.gettransaction(txid) assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore_rpc.gettransaction, out_of_kp_txid) restore2_rpc.gettransaction(txid) assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore2_rpc.gettransaction, out_of_kp_txid) # After rescanning, restore_rpc should now see out_of_kp_txid and generate an additional key. # addr should now be part of restore_rpc and be ismine restore_rpc.rescanblockchain() restore_rpc.gettransaction(out_of_kp_txid) info = restore_rpc.getaddressinfo(addr) assert_equal(info['ismine'], True) restore2_rpc.rescanblockchain() restore2_rpc.gettransaction(out_of_kp_txid) info = restore2_rpc.getaddressinfo(addr) assert_equal(info['ismine'], True) # Check again that 3 keys were derived. # Empty keypool and get an address that is beyond the initial # keypool origin_rpc.getnewaddress() origin_rpc.getnewaddress() last_addr = origin_rpc.getnewaddress() addr = origin_rpc.getnewaddress() # Check that the restored seed has last_addr but does not have addr info = restore_rpc.getaddressinfo(last_addr) assert_equal(info['ismine'], True) info = restore_rpc.getaddressinfo(addr) assert_equal(info['ismine'], False) info = restore2_rpc.getaddressinfo(last_addr) assert_equal(info['ismine'], True) info = restore2_rpc.getaddressinfo(addr) assert_equal(info['ismine'], False) if __name__ == '__main__': WalletHDTest().main() diff --git a/test/functional/wallet_reorgsrestore.py b/test/functional/wallet_reorgsrestore.py index 0bde1171b..7da94242d 100755 --- a/test/functional/wallet_reorgsrestore.py +++ b/test/functional/wallet_reorgsrestore.py @@ -1,128 +1,127 @@ #!/usr/bin/env python3 # Copyright (c) 2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test tx status in case of reorgs while wallet being shutdown. Wallet txn status rely on block connection/disconnection for its accuracy. In case of reorgs happening while wallet being shutdown block updates are not going to be received. At wallet loading, we check against chain if confirmed txn are still in chain and change their status if block in which they have been included has been disconnected. """ from decimal import Decimal import os import shutil from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes, disconnect_nodes, ) class ReorgsRestoreTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 3 self.extra_args = [["-noparkdeepreorg"]] * self.num_nodes def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): # Send a tx from which to conflict outputs later txid_conflict_from = self.nodes[0].sendtoaddress( self.nodes[0].getnewaddress(), Decimal("10")) self.nodes[0].generate(1) self.sync_blocks() # Disconnect node1 from others to reorg its chain later disconnect_nodes(self.nodes[0], self.nodes[1]) disconnect_nodes(self.nodes[1], self.nodes[2]) connect_nodes(self.nodes[0], self.nodes[2]) # Send a tx to be unconfirmed later txid = self.nodes[0].sendtoaddress( self.nodes[0].getnewaddress(), Decimal("10")) tx = self.nodes[0].gettransaction(txid) self.nodes[0].generate(4) tx_before_reorg = self.nodes[0].gettransaction(txid) assert_equal(tx_before_reorg["confirmations"], 4) # Disconnect node0 from node2 to broadcast a conflict on their # respective chains disconnect_nodes(self.nodes[0], self.nodes[2]) nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction( txid_conflict_from)["details"] if tx_out["amount"] == Decimal("10")) inputs = [] inputs.append({"txid": txid_conflict_from, "vout": nA}) outputs_1 = {} outputs_2 = {} # Create a conflicted tx broadcast on node0 chain and conflicting tx # broadcast on node1 chain. Both spend from txid_conflict_from outputs_1[self.nodes[0].getnewaddress()] = Decimal("9.99998") outputs_2[self.nodes[0].getnewaddress()] = Decimal("9.99998") conflicted = self.nodes[0].signrawtransactionwithwallet( self.nodes[0].createrawtransaction(inputs, outputs_1)) conflicting = self.nodes[0].signrawtransactionwithwallet( self.nodes[0].createrawtransaction(inputs, outputs_2)) conflicted_txid = self.nodes[0].sendrawtransaction(conflicted["hex"]) self.nodes[0].generate(1) conflicting_txid = self.nodes[2].sendrawtransaction(conflicting["hex"]) self.nodes[2].generate(9) # Reconnect node0 and node2 and check that conflicted_txid is # effectively conflicted connect_nodes(self.nodes[0], self.nodes[2]) self.sync_blocks([self.nodes[0], self.nodes[2]]) conflicted = self.nodes[0].gettransaction(conflicted_txid) conflicting = self.nodes[0].gettransaction(conflicting_txid) assert_equal(conflicted["confirmations"], -9) assert_equal(conflicted["walletconflicts"][0], conflicting["txid"]) # Node0 wallet is shutdown - self.stop_node(0) - self.start_node(0) + self.restart_node(0) # The block chain re-orgs and the tx is included in a different block self.nodes[1].generate(9) self.nodes[1].sendrawtransaction(tx["hex"]) self.nodes[1].generate(1) self.nodes[1].sendrawtransaction(conflicted["hex"]) self.nodes[1].generate(1) # Node0 wallet file is loaded on longest sync'ed node1 self.stop_node(1) self.nodes[0].backupwallet( os.path.join( self.nodes[0].datadir, 'wallet.bak')) shutil.copyfile( os.path.join( self.nodes[0].datadir, 'wallet.bak'), os.path.join( self.nodes[1].datadir, self.chain, 'wallet.dat')) self.start_node(1) tx_after_reorg = self.nodes[1].gettransaction(txid) # Check that normal confirmed tx is confirmed again but with different # blockhash assert_equal(tx_after_reorg["confirmations"], 2) assert(tx_before_reorg["blockhash"] != tx_after_reorg["blockhash"]) conflicted_after_reorg = self.nodes[1].gettransaction(conflicted_txid) # Check that conflicted tx is confirmed again with blockhash different # than previously conflicting tx assert_equal(conflicted_after_reorg["confirmations"], 1) assert(conflicting["blockhash"] != conflicted_after_reorg["blockhash"]) if __name__ == '__main__': ReorgsRestoreTest().main() diff --git a/test/functional/wallet_zapwallettxes.py b/test/functional/wallet_zapwallettxes.py index 572c09052..329ff8df2 100755 --- a/test/functional/wallet_zapwallettxes.py +++ b/test/functional/wallet_zapwallettxes.py @@ -1,90 +1,87 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the zapwallettxes functionality. - start two bitcoind nodes - create two transactions on node 0 - one is confirmed and one is unconfirmed. - restart node 0 and verify that both the confirmed and the unconfirmed transactions are still available. - restart node 0 with zapwallettxes and persistmempool, and verify that both the confirmed and the unconfirmed transactions are still available. - restart node 0 with just zapwallettxes and verify that the confirmed transactions are still available, but that the unconfirmed transaction has been zapped. """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, wait_until, ) class ZapWalletTXesTest (BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): self.log.info("Mining blocks...") self.nodes[0].generate(1) self.sync_all() self.nodes[1].generate(100) self.sync_all() # This transaction will be confirmed txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10) self.nodes[0].generate(1) self.sync_all() # This transaction will not be confirmed txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20) # Confirmed and unconfirmed transactions are now in the wallet. assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2) - # Stop-start node0. Both confirmed and unconfirmed transactions remain + # Restart node0. Both confirmed and unconfirmed transactions remain # in the wallet. - self.stop_node(0) - self.start_node(0) + self.restart_node(0) assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2) - # Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed + # Restart node0 with zapwallettxes and persistmempool. The unconfirmed # transaction is zapped from the wallet, but is re-added when the # mempool is reloaded. - self.stop_node(0) - self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"]) + self.restart_node(0, ["-persistmempool=1", "-zapwallettxes=2"]) wait_until(lambda: self.nodes[0].getmempoolinfo()[ 'size'] == 1, timeout=3) # Flush mempool to wallet self.nodes[0].syncwithvalidationinterfacequeue() assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2) - # Stop node0 and restart with zapwallettxes, but not persistmempool. + # Restart node0 with zapwallettxes, but not persistmempool. # The unconfirmed transaction is zapped and is no longer in the wallet. - self.stop_node(0) - self.start_node(0, ["-zapwallettxes=2"]) + self.restart_node(0, ["-zapwallettxes=2"]) # tx1 is still be available because it was confirmed assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) # This will raise an exception because the unconfirmed transaction has # been zapped assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2) if __name__ == '__main__': ZapWalletTXesTest().main()