diff --git a/test/functional/example_test.py b/test/functional/example_test.py index 030b98dd8..b856752a0 100755 --- a/test/functional/example_test.py +++ b/test/functional/example_test.py @@ -1,216 +1,216 @@ #!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """An example functional test The module-level docstring should include a high-level description of what the test is doing. It's the first thing people see when they open the file and should give the reader information about *what* the test is testing and *how* it's being tested """ # Imports should be in PEP8 ordering (std library first, then third party # libraries then local imports). from collections import defaultdict # Avoid wildcard * imports if possible from test_framework.blocktools import (create_block, create_coinbase) from test_framework.messages import (CInv, msg_block, msg_getdata) from test_framework.mininode import ( P2PInterface, mininode_lock, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes, wait_until, ) # P2PInterface is a class containing callbacks to be executed when a P2P # message is received from the node-under-test. Subclass P2PInterface and # override the on_*() methods if you need custom behaviour. class BaseNode(P2PInterface): def __init__(self): """Initialize the P2PInterface Used to initialize custom properties for the Node that aren't included by default in the base class. Be aware that the P2PInterface base class already stores a counter for each P2P message type and the last received message of each type, which should be sufficient for the needs of most tests. Call super().__init__() first for standard initialization and then initialize custom properties.""" super().__init__() # Stores a dictionary of all blocks received self.block_receive_map = defaultdict(int) def on_block(self, message): """Override the standard on_block callback Store the hash of a received block in the dictionary.""" message.block.calc_sha256() self.block_receive_map[message.block.sha256] += 1 def on_inv(self, message): """Override the standard on_inv callback""" pass def custom_function(): """Do some custom behaviour If this function is more generally useful for other tests, consider moving it to a module in test_framework.""" # self.log.info("running custom_function") # Oops! Can't run self.log outside the BitcoinTestFramework pass class ExampleTest(BitcoinTestFramework): # Each functional test is a subclass of the BitcoinTestFramework class. # Override the set_test_params(), add_options(), setup_chain(), setup_network() # and setup_nodes() methods to customize the test setup as required. def set_test_params(self): """Override test parameters for your individual test. This method must be overridden and num_nodes must be exlicitly set.""" self.setup_clean_chain = True self.num_nodes = 3 # Use self.extra_args to change command-line arguments for the nodes self.extra_args = [[], ["-logips"], []] # self.log.info("I've finished set_test_params") # Oops! Can't run self.log before run_test() # Use add_options() to add specific command-line options for your test. # In practice this is not used very much, since the tests are mostly written # to be run in automated environments without command-line options. # def add_options() # pass # Use setup_chain() to customize the node data directories. In practice # this is not used very much since the default behaviour is almost always # fine # def setup_chain(): # pass def setup_network(self): """Setup the test network topology Often you won't need to override this, since the standard network topology (linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests. If you do override this method, remember to start the nodes, assign them to self.nodes, connect them and then sync.""" self.setup_nodes() # In this test, we're not connecting node2 to node0 or node1. Calls to # sync_all() should not include node2, since we're not expecting it to # sync. connect_nodes(self.nodes[0], self.nodes[1]) self.sync_all([self.nodes[0:1]]) # Use setup_nodes() to customize the node start behaviour (for example if # you don't want to start all nodes at the start of the test). # def setup_nodes(): # pass def custom_method(self): """Do some custom behaviour for this test Define it in a method here because you're going to use it repeatedly. If you think it's useful in general, consider moving it to the base BitcoinTestFramework class so other tests can use it.""" self.log.info("Running custom_method") def run_test(self): """Main test logic""" # Create P2P connections will wait for a verack to make sure the connection is fully up self.nodes[0].add_p2p_connection(BaseNode()) # Generating a block on one of the nodes will get us out of IBD blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)] self.sync_all([self.nodes[0:1]]) # Notice above how we called an RPC by calling a method with the same # name on the node object. Notice also how we used a keyword argument # to specify a named RPC argument. Neither of those are defined on the # node object. Instead there's some __getattr__() magic going on under # the covers to dispatch unrecognised attribute calls to the RPC # interface. # Logs are nice. Do plenty of them. They can be used in place of comments for # breaking the test into sub-sections. self.log.info("Starting test!") self.log.info("Calling a custom function") custom_function() self.log.info("Calling a custom method") self.custom_method() self.log.info("Create some blocks") self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] + 1 height = 1 for i in range(10): # Use the mininode and blocktools functionality to manually build a block # Calling the generate() rpc is easier, but this allows us to exactly # control the blocks and transactions. block = create_block( self.tip, create_coinbase(height), self.block_time) block.solve() block_message = msg_block(block) # Send message is used to send a P2P message to the node over our P2PInterface self.nodes[0].p2p.send_message(block_message) self.tip = block.sha256 blocks.append(self.tip) self.block_time += 1 height += 1 self.log.info( "Wait for node1 to reach current tip (height 11) using RPC") self.nodes[1].waitforblockheight(11) self.log.info("Connect node2 and node1") connect_nodes(self.nodes[1], self.nodes[2]) self.log.info("Add P2P connection to node2") self.nodes[0].disconnect_p2ps() self.nodes[2].add_p2p_connection(BaseNode()) self.log.info( "Wait for node2 reach current tip. Test that it has propagated all the blocks to us") getdata_request = msg_getdata() for block in blocks: getdata_request.inv.append(CInv(2, block)) self.nodes[2].p2p.send_message(getdata_request) # wait_until() will loop until a predicate condition is met. Use it to test properties of the # P2PInterface objects. wait_until(lambda: sorted(blocks) == sorted( list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock) self.log.info("Check that each block was received only once") # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate. with mininode_lock: for block in self.nodes[2].p2p.block_receive_map.values(): assert_equal(block, 1) if __name__ == '__main__': ExampleTest().main() diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py index c804b1d53..b99faf84f 100755 --- a/test/functional/feature_assumevalid.py +++ b/test/functional/feature_assumevalid.py @@ -1,204 +1,204 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test logic for skipping signature validation on old blocks. Test logic for skipping signature validation on blocks which we've assumed valid (https://github.com/bitcoin/bitcoin/pull/9484) We build a chain that includes and invalid signature for one of the transactions: 0: genesis block 1: block 1 with coinbase transaction output. 2-101: bury that block with 100 blocks so the coinbase transaction output can be spent 102: a block containing a transaction spending the coinbase transaction output. The transaction has an invalid signature. 103-2202: bury the bad block with just over two weeks' worth of blocks (2100 blocks) Start three nodes: - node0 has no -assumevalid parameter. Try to sync to block 2202. It will reject block 102 and only sync as far as block 101 - node1 has -assumevalid set to the hash of block 102. Try to sync to block 2202. node1 will sync all the way to block 2202. - node2 has -assumevalid set to the hash of block 102. Try to sync to block 200. node2 will reject block 102 since it's assumed valid, but it isn't buried by at least two weeks' work. """ import time from test_framework.blocktools import (create_block, create_coinbase) from test_framework.key import CECKey from test_framework.messages import ( CBlockHeader, COutPoint, CTransaction, CTxIn, CTxOut, msg_block, msg_headers, ) from test_framework.mininode import P2PInterface from test_framework.script import (CScript, OP_TRUE) from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import assert_equal class BaseNode(P2PInterface): def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) class AssumeValidTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 def setup_network(self): self.add_nodes(3) # Start node0. We don't start the other nodes yet since # we need to pre-mine a block with an invalid transaction # signature so we can pass in the block hash as assumevalid. self.start_node(0) def send_blocks_until_disconnected(self, p2p_conn): """Keep sending blocks to the node until we're disconnected.""" for i in range(len(self.blocks)): if not p2p_conn.is_connected: break try: p2p_conn.send_message(msg_block(self.blocks[i])) except IOError: assert not p2p_conn.is_connected break def assert_blockchain_height(self, node, height): """Wait until the blockchain is no longer advancing and verify it's reached the expected height.""" last_height = node.getblock(node.getbestblockhash())['height'] timeout = 10 while True: time.sleep(0.25) current_height = node.getblock(node.getbestblockhash())['height'] if current_height != last_height: last_height = current_height if timeout < 0: assert False, "blockchain too short after timeout: {}".format( current_height) timeout - 0.25 continue elif current_height > height: assert False, "blockchain too long: {}".format(current_height) elif current_height == height: break def run_test(self): p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] + 1 self.blocks = [] # Get a pubkey for the coinbase TXO coinbase_key = CECKey() coinbase_key.set_secretbytes(b"horsebattery") coinbase_pubkey = coinbase_key.get_pubkey() # Create the first block with a coinbase output to our key height = 1 block = create_block(self.tip, create_coinbase( height, coinbase_pubkey), self.block_time) self.blocks.append(block) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 height += 1 # Bury the block 100 deep so the coinbase output is spendable for i in range(100): block = create_block( self.tip, create_coinbase(height), self.block_time) block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # Create a transaction spending the coinbase output with an invalid (null) signature tx = CTransaction() tx.vin.append( CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) pad_tx(tx) tx.calc_sha256() block102 = create_block( self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block102.vtx.extend([tx]) block102.hashMerkleRoot = block102.calc_merkle_root() block102.rehash() block102.solve() self.blocks.append(block102) self.tip = block102.sha256 self.block_time += 1 height += 1 # Bury the assumed valid block 2100 deep for i in range(2100): block = create_block( self.tip, create_coinbase(height), self.block_time) block.nVersion = 4 block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 self.nodes[0].disconnect_p2ps() # Start node1 and node2 with assumevalid so they accept a block with a bad signature. self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)]) self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)]) p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) # send header lists to all three nodes p2p0.send_header_for_blocks(self.blocks[0:2000]) p2p0.send_header_for_blocks(self.blocks[2000:]) p2p1.send_header_for_blocks(self.blocks[0:2000]) p2p1.send_header_for_blocks(self.blocks[2000:]) p2p2.send_header_for_blocks(self.blocks[0:200]) # Send blocks to node0. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p0) self.assert_blockchain_height(self.nodes[0], 101) # Send all blocks to node1. All blocks will be accepted. for i in range(2202): p2p1.send_message(msg_block(self.blocks[i])) # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. p2p1.sync_with_ping(120) assert_equal(self.nodes[1].getblock( self.nodes[1].getbestblockhash())['height'], 2202) # Send blocks to node2. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p2) self.assert_blockchain_height(self.nodes[2], 101) if __name__ == '__main__': AssumeValidTest().main() diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py index b62505679..8c070ea7e 100755 --- a/test/functional/feature_bip68_sequence.py +++ b/test/functional/feature_bip68_sequence.py @@ -1,495 +1,495 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test BIP68 implementation.""" import time from test_framework.blocktools import ( create_block, create_coinbase, ) from test_framework.messages import ( COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, ToHex, ) from test_framework.script import CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, disconnect_nodes, satoshi_round, sync_blocks, ) SEQUENCE_LOCKTIME_DISABLE_FLAG = (1 << 31) # this means use time (0 means height) SEQUENCE_LOCKTIME_TYPE_FLAG = (1 << 22) # this is a bit-shift SEQUENCE_LOCKTIME_GRANULARITY = 9 SEQUENCE_LOCKTIME_MASK = 0x0000ffff # RPC error for non-BIP68 final transactions NOT_FINAL_ERROR = "non-BIP68-final (code 64)" class BIP68Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [["-blockprioritypercentage=0", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-blockprioritypercentage=0", "-acceptnonstdtxn=0", "-maxreorgdepth=-1"]] def run_test(self): self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] # Generate some coins self.nodes[0].generate(110) self.log.info("Running test disable flag") self.test_disable_flag() self.log.info("Running test sequence-lock-confirmed-inputs") self.test_sequence_lock_confirmed_inputs() self.log.info("Running test sequence-lock-unconfirmed-inputs") self.test_sequence_lock_unconfirmed_inputs() self.log.info( "Running test BIP68 not consensus before versionbits activation") self.test_bip68_not_consensus() self.log.info("Verifying nVersion=2 transactions aren't standard") self.test_version2_relay(before_activation=True) self.log.info("Activating BIP68 (and 112/113)") self.activateCSV() self.log.info("Verifying nVersion=2 transactions are now standard") self.test_version2_relay(before_activation=False) self.log.info("Passed") # Test that BIP68 is not in effect if tx version is 1, or if # the first sequence bit is set. def test_disable_flag(self): # Create some unconfirmed inputs new_addr = self.nodes[0].getnewaddress() # send 2 BCH self.nodes[0].sendtoaddress(new_addr, 2) utxos = self.nodes[0].listunspent(0, 0) assert len(utxos) > 0 utxo = utxos[0] tx1 = CTransaction() value = int(satoshi_round(utxo["amount"] - self.relayfee) * COIN) # Check that the disable flag disables relative locktime. # If sequence locks were used, this would require 1 block for the # input to mature. sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1 tx1.vin = [ CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)] tx1.vout = [CTxOut(value, CScript([b'a']))] pad_tx(tx1) tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))[ "hex"] tx1_id = self.nodes[0].sendrawtransaction(tx1_signed) tx1_id = int(tx1_id, 16) # This transaction will enable sequence-locks, so this transaction should # fail tx2 = CTransaction() tx2.nVersion = 2 sequence_value = sequence_value & 0x7fffffff tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)] tx2.vout = [CTxOut(int(value - self.relayfee * COIN), CScript([b'a']))] pad_tx(tx2) tx2.rehash() assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2)) # Setting the version back down to 1 should disable the sequence lock, # so this should be accepted. tx2.nVersion = 1 self.nodes[0].sendrawtransaction(ToHex(tx2)) # Calculate the median time past of a prior block ("confirmations" before # the current tip). def get_median_time_past(self, confirmations): block_hash = self.nodes[0].getblockhash( self.nodes[0].getblockcount() - confirmations) return self.nodes[0].getblockheader(block_hash)["mediantime"] # Test that sequence locks are respected for transactions spending # confirmed inputs. def test_sequence_lock_confirmed_inputs(self): # Create lots of confirmed utxos, and use them to generate lots of random # transactions. max_outputs = 50 addresses = [] while len(addresses) < max_outputs: addresses.append(self.nodes[0].getnewaddress()) while len(self.nodes[0].listunspent()) < 200: import random random.shuffle(addresses) num_outputs = random.randint(1, max_outputs) outputs = {} for i in range(num_outputs): outputs[addresses[i]] = random.randint(1, 20) * 0.01 self.nodes[0].sendmany("", outputs) self.nodes[0].generate(1) utxos = self.nodes[0].listunspent() # Try creating a lot of random transactions. # Each time, choose a random number of inputs, and randomly set # some of those inputs to be sequence locked (and randomly choose # between height/time locking). Small random chance of making the locks # all pass. for i in range(400): # Randomly choose up to 10 inputs num_inputs = random.randint(1, 10) random.shuffle(utxos) # Track whether any sequence locks used should fail should_pass = True # Track whether this transaction was built with sequence locks using_sequence_locks = False tx = CTransaction() tx.nVersion = 2 value = 0 for j in range(num_inputs): # this disables sequence locks sequence_value = 0xfffffffe # 50% chance we enable sequence locks if random.randint(0, 1): using_sequence_locks = True # 10% of the time, make the input sequence value pass input_will_pass = (random.randint(1, 10) == 1) sequence_value = utxos[j]["confirmations"] if not input_will_pass: sequence_value += 1 should_pass = False # Figure out what the median-time-past was for the confirmed input # Note that if an input has N confirmations, we're going back N blocks # from the tip so that we're looking up MTP of the block # PRIOR to the one the input appears in, as per the BIP68 # spec. orig_time = self.get_median_time_past( utxos[j]["confirmations"]) # MTP of the tip cur_time = self.get_median_time_past(0) # can only timelock this input if it's not too old -- # otherwise use height can_time_lock = True if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK: can_time_lock = False # if time-lockable, then 50% chance we make this a time # lock if random.randint(0, 1) and can_time_lock: # Find first time-lock value that fails, or latest one # that succeeds time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY if input_will_pass and time_delta > cur_time - orig_time: sequence_value = ( (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) elif (not input_will_pass and time_delta <= cur_time - orig_time): sequence_value = ( (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) + 1 sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx.vin.append( CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value)) value += utxos[j]["amount"] * COIN # Overestimate the size of the tx - signatures should be less than # 120 bytes, and leave 50 for the output tx_size = len(ToHex(tx)) // 2 + 120 * num_inputs + 50 tx.vout.append( CTxOut(int(value - self.relayfee * tx_size * COIN / 1000), CScript([b'a']))) rawtx = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))[ "hex"] if (using_sequence_locks and not should_pass): # This transaction should be rejected assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx) else: # This raw transaction should be accepted self.nodes[0].sendrawtransaction(rawtx) utxos = self.nodes[0].listunspent() # Test that sequence locks on unconfirmed inputs must have nSequence # height or time of 0 to be accepted. # Then test that BIP68-invalid transactions are removed from the mempool # after a reorg. def test_sequence_lock_unconfirmed_inputs(self): # Store height so we can easily reset the chain at the end of the test cur_height = self.nodes[0].getblockcount() # Create a mempool tx. txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # As the fees are calculated prior to the transaction being signed, # there is some uncertainty that calculate fee provides the correct # minimal fee. Since regtest coins are free, let's go ahead and # increase the fee by an order of magnitude to ensure this test # passes. fee_multiplier = 10 # Anyone-can-spend mempool tx. # Sequence lock of 0 should pass. tx2 = CTransaction() tx2.nVersion = 2 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(0), CScript([b'a']))] tx2.vout[0].nValue = tx1.vout[0].nValue - \ fee_multiplier * self.nodes[0].calculate_fee(tx2) tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) tx2.rehash() self.nodes[0].sendrawtransaction(tx2_raw) # Create a spend of the 0th output of orig_tx with a sequence lock # of 1, and test what happens when submitting. # orig_tx.vout[0] must be an anyone-can-spend output def test_nonzero_locks(orig_tx, node, use_height_lock): sequence_value = 1 if not use_height_lock: sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx = CTransaction() tx.nVersion = 2 tx.vin = [ CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)] tx.vout = [ CTxOut(int(orig_tx.vout[0].nValue - fee_multiplier * node.calculate_fee(tx)), CScript([b'a']))] pad_tx(tx) tx.rehash() if (orig_tx.hash in node.getrawmempool()): # sendrawtransaction should fail if the tx is in the mempool assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx)) else: # sendrawtransaction should succeed if the tx is not in the mempool node.sendrawtransaction(ToHex(tx)) return tx test_nonzero_locks( tx2, self.nodes[0], use_height_lock=True) test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) # Now mine some blocks, but make sure tx2 doesn't get mined. # Use prioritisetransaction to lower the effective feerate to 0 self.nodes[0].prioritisetransaction( tx2.hash, -1e15, -fee_multiplier * self.nodes[0].calculate_fee(tx2)) cur_time = int(time.time()) for i in range(10): self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) cur_time += 600 assert tx2.hash in self.nodes[0].getrawmempool() test_nonzero_locks( tx2, self.nodes[0], use_height_lock=True) test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) # Mine tx2, and then try again self.nodes[0].prioritisetransaction( tx2.hash, 1e15, fee_multiplier * self.nodes[0].calculate_fee(tx2)) # Advance the time on the node so that we can test timelocks self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) assert tx2.hash not in self.nodes[0].getrawmempool() # Now that tx2 is not in the mempool, a sequence locked spend should # succeed tx3 = test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) assert tx3.hash in self.nodes[0].getrawmempool() self.nodes[0].generate(1) assert tx3.hash not in self.nodes[0].getrawmempool() # One more test, this time using height locks tx4 = test_nonzero_locks( tx3, self.nodes[0], use_height_lock=True) assert tx4.hash in self.nodes[0].getrawmempool() # Now try combining confirmed and unconfirmed inputs tx5 = test_nonzero_locks( tx4, self.nodes[0], use_height_lock=True) assert tx5.hash not in self.nodes[0].getrawmempool() utxos = self.nodes[0].listunspent() tx5.vin.append( CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) tx5.vout[0].nValue += int(utxos[0]["amount"] * COIN) raw_tx5 = self.nodes[0].signrawtransactionwithwallet(ToHex(tx5))["hex"] assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5) # Test mempool-BIP68 consistency after reorg # # State of the transactions in the last blocks: # ... -> [ tx2 ] -> [ tx3 ] # tip-1 tip # And currently tx4 is in the mempool. # # If we invalidate the tip, tx3 should get added to the mempool, causing # tx4 to be removed (fails sequence-lock). self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) assert tx4.hash not in self.nodes[0].getrawmempool() assert tx3.hash in self.nodes[0].getrawmempool() # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in # diagram above). # This would cause tx2 to be added back to the mempool, which in turn causes # tx3 to be removed. tip = int(self.nodes[0].getblockhash( self.nodes[0].getblockcount() - 1), 16) height = self.nodes[0].getblockcount() for i in range(2): block = create_block(tip, create_coinbase(height), cur_time) block.nVersion = 3 block.rehash() block.solve() tip = block.sha256 height += 1 self.nodes[0].submitblock(ToHex(block)) cur_time += 1 mempool = self.nodes[0].getrawmempool() assert tx3.hash not in mempool assert tx2.hash in mempool # Reset the chain and get rid of the mocktimed-blocks self.nodes[0].setmocktime(0) self.nodes[0].invalidateblock( self.nodes[0].getblockhash(cur_height + 1)) self.nodes[0].generate(10) def get_csv_status(self): height = self.nodes[0].getblockchaininfo()['blocks'] return height >= 576 # Make sure that BIP68 isn't being used to validate blocks, prior to # versionbits activation. If more blocks are mined prior to this test # being run, then it's possible the test has activated the soft fork, and # this test should be moved to run earlier, or deleted. def test_bip68_not_consensus(self): assert_equal(self.get_csv_status(), False) txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # Make an anyone-can-spend transaction tx2 = CTransaction() tx2.nVersion = 1 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(tx1.vout[0].nValue - self.relayfee * COIN), CScript([b'a']))] # sign tx2 tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) pad_tx(tx2) tx2.rehash() self.nodes[0].sendrawtransaction(ToHex(tx2)) # Now make an invalid spend of tx2 according to BIP68 # 100 block relative locktime sequence_value = 100 tx3 = CTransaction() tx3.nVersion = 2 tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)] tx3.vout = [ CTxOut(int(tx2.vout[0].nValue - self.relayfee * COIN), CScript([b'a']))] pad_tx(tx3) tx3.rehash() assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3)) # make a block that violates bip68; ensure that the tip updates tip = int(self.nodes[0].getbestblockhash(), 16) block = create_block( tip, create_coinbase(self.nodes[0].getblockcount() + 1)) block.nVersion = 3 block.vtx.extend( sorted([tx1, tx2, tx3], key=lambda tx: tx.get_id())) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].submitblock(ToHex(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) def activateCSV(self): # activation should happen at block height 576 csv_activation_height = 576 height = self.nodes[0].getblockcount() assert_greater_than(csv_activation_height - height, 1) self.nodes[0].generate(csv_activation_height - height - 1) assert_equal(self.get_csv_status(), False) disconnect_nodes(self.nodes[0], self.nodes[1]) self.nodes[0].generate(1) assert_equal(self.get_csv_status(), True) # We have a block that has CSV activated, but we want to be at # the activation point, so we invalidate the tip. self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) connect_nodes(self.nodes[0], self.nodes[1]) sync_blocks(self.nodes) # Use self.nodes[1] to test standardness relay policy def test_version2_relay(self, before_activation): inputs = [] outputs = {self.nodes[1].getnewaddress(): 1.0} rawtx = self.nodes[1].createrawtransaction(inputs, outputs) rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex'] tx = FromHex(CTransaction(), rawtxfund) tx.nVersion = 2 tx_signed = self.nodes[1].signrawtransactionwithwallet(ToHex(tx))[ "hex"] try: self.nodes[1].sendrawtransaction(tx_signed) assert before_activation == False except: assert before_activation if __name__ == '__main__': BIP68Test().main() diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py index 80ac48718..491efd880 100755 --- a/test/functional/feature_cltv.py +++ b/test/functional/feature_cltv.py @@ -1,223 +1,223 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test BIP65 (CHECKLOCKTIMEVERIFY). Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height 1351. """ from test_framework.blocktools import create_block, create_coinbase, make_conform_to_ctor from test_framework.messages import ( CTransaction, FromHex, msg_block, msg_tx, ToHex, ) from test_framework.mininode import ( P2PInterface, ) from test_framework.script import ( CScript, CScriptNum, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP, OP_TRUE, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import assert_equal CLTV_HEIGHT = 1351 # Reject codes that we might receive in this test REJECT_INVALID = 16 REJECT_OBSOLETE = 17 REJECT_NONSTANDARD = 64 def cltv_lock_to_height(node, tx, to_address, amount, height=-1): '''Modify the scriptPubKey to add an OP_CHECKLOCKTIMEVERIFY, and make a transaction that spends it. This transforms the output script to anyone can spend (OP_TRUE) if the lock time condition is valid. Default height is -1 which leads CLTV to fail TODO: test more ways that transactions using CLTV could be invalid (eg locktime requirements fail, sequence time requirements fail, etc). ''' height_op = OP_1NEGATE if(height > 0): tx.vin[0].nSequence = 0 tx.nLockTime = height height_op = CScriptNum(height) tx.vout[0].scriptPubKey = CScript( [height_op, OP_CHECKLOCKTIMEVERIFY, OP_DROP, OP_TRUE]) pad_tx(tx) fundtx_raw = node.signrawtransactionwithwallet(ToHex(tx))['hex'] fundtx = FromHex(CTransaction(), fundtx_raw) fundtx.rehash() # make spending tx inputs = [{ "txid": fundtx.hash, "vout": 0 }] output = {to_address: amount} spendtx_raw = node.createrawtransaction(inputs, output) spendtx = FromHex(CTransaction(), spendtx_raw) pad_tx(spendtx) return fundtx, spendtx def spend_from_coinbase(node, coinbase, to_address, amount): from_txid = node.getblock(coinbase)['tx'][0] inputs = [{"txid": from_txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransactionwithwallet(rawtx) tx = FromHex(CTransaction(), signresult['hex']) return tx class BIP65Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [['-whitelist=127.0.0.1']] self.setup_clean_chain = True def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info("Mining {} blocks".format(CLTV_HEIGHT - 2)) self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2) self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that an invalid-according-to-CLTV transaction can still appear in a block") fundtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[0], self.nodeaddress, 49.99) fundtx, spendtx = cltv_lock_to_height( self.nodes[0], fundtx, self.nodeaddress, 49.98) tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase( CLTV_HEIGHT - 1), block_time) block.nVersion = 3 block.vtx.append(fundtx) # include the -1 CLTV in block block.vtx.append(spendtx) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 4") tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) block.nVersion = 3 block.solve() with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000003)'.format(block.hash)]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) self.nodes[0].p2p.sync_with_ping() self.log.info( "Test that invalid-according-to-cltv transactions cannot appear in a block") block.nVersion = 4 fundtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 49.99) fundtx, spendtx = cltv_lock_to_height( self.nodes[0], fundtx, self.nodeaddress, 49.98) # The funding tx only has unexecuted bad CLTV, in scriptpubkey; this is valid. self.nodes[0].p2p.send_and_ping(msg_tx(fundtx)) assert fundtx.hash in self.nodes[0].getrawmempool() # Mine a block containing the funding transaction block.vtx.append(fundtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) # We show that this tx is invalid due to CLTV by getting it # rejected from the mempool for exactly that reason. assert_equal( [{'txid': spendtx.hash, 'allowed': False, 'reject-reason': '64: non-mandatory-script-verify-flag (Negative locktime)'}], self.nodes[0].testmempoolaccept( rawtxs=[spendtx.serialize().hex()], allowhighfees=True) ) rejectedtx_signed = self.nodes[0].signrawtransactionwithwallet( ToHex(spendtx)) # Couldn't complete signature due to CLTV assert rejectedtx_signed['errors'][0]['error'] == 'Negative locktime' tip = block.hash block_time += 1 block = create_block( block.sha256, create_coinbase(CLTV_HEIGHT+1), block_time) block.nVersion = 4 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() with self.nodes[0].assert_debug_log(expected_msgs=['ConnectBlock {} failed (blk-bad-inputs'.format(block.hash)]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) self.nodes[0].p2p.sync_with_ping() self.log.info( "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted") fundtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[2], self.nodeaddress, 49.99) fundtx, spendtx = cltv_lock_to_height( self.nodes[0], fundtx, self.nodeaddress, 49.98, CLTV_HEIGHT) # make sure sequence is nonfinal and locktime is good spendtx.vin[0].nSequence = 0xfffffffe spendtx.nLockTime = CLTV_HEIGHT # both transactions are fully valid self.nodes[0].sendrawtransaction(ToHex(fundtx)) self.nodes[0].sendrawtransaction(ToHex(spendtx)) # Modify the transactions in the block to be valid against CLTV block.vtx.pop(1) block.vtx.append(fundtx) block.vtx.append(spendtx) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is now valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) if __name__ == '__main__': BIP65Test().main() diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py index 842ae8b08..e968d3c06 100755 --- a/test/functional/feature_config_args.py +++ b/test/functional/feature_config_args.py @@ -1,59 +1,59 @@ #!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test various command line arguments and configuration file parameters.""" import os from test_framework.test_framework import BitcoinTestFramework class ConfArgsTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def run_test(self): self.stop_node(0) # Remove the -datadir argument so it doesn't override the config file self.nodes[0].remove_default_args(["-datadir"]) default_data_dir = self.nodes[0].datadir new_data_dir = os.path.join(default_data_dir, 'newdatadir') new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2') # Check that using -datadir argument on non-existent directory fails self.nodes[0].datadir = new_data_dir self.nodes[0].assert_start_raises_init_error( ['-datadir=' + new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.') # Check that using non-existent datadir in conf file fails conf_file = os.path.join(default_data_dir, "bitcoin.conf") # datadir needs to be set before [regtest] section conf_file_contents = open(conf_file, encoding='utf8').read() with open(conf_file, 'w', encoding='utf8') as f: f.write("datadir=" + new_data_dir + "\n") f.write(conf_file_contents) self.nodes[0].assert_start_raises_init_error( ['-conf=' + conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.') # Create the directory and ensure the config file now works os.mkdir(new_data_dir) self.start_node(0, ['-conf='+conf_file, '-wallet=w1']) self.stop_node(0) assert os.path.exists(os.path.join( new_data_dir, 'regtest', 'wallets', 'w1')) # Ensure command line argument overrides datadir in conf os.mkdir(new_data_dir_2) self.nodes[0].datadir = new_data_dir_2 self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2']) assert os.path.exists(os.path.join( new_data_dir_2, 'regtest', 'wallets', 'w2')) if __name__ == '__main__': ConfArgsTest().main() diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py index 6a0a6a0b4..edc0953ba 100755 --- a/test/functional/feature_csv_activation.py +++ b/test/functional/feature_csv_activation.py @@ -1,663 +1,663 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test activation of the first version bits soft fork. This soft fork will activate the following BIPS: BIP 68 - nSequence relative lock times BIP 112 - CHECKSEQUENCEVERIFY BIP 113 - MedianTimePast semantics for nLockTime regtest lock-in with 108/144 block signalling activation after a further 144 blocks mine 82 blocks whose coinbases will be used to generate inputs for our tests mine 489 blocks and seed block chain with the 82 inputs will use for our tests at height 572 mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered mine 1 block and test that enforcement has triggered (which triggers ACTIVE) Test BIP 113 is enforced Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height Mine 1 block so next height is 581 and test BIP 68 now passes time but not height Mine 1 block so next height is 582 and test BIP 68 now passes time and height Test that BIP 112 is enforced Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates And that after the soft fork activates transactions pass and fail as they should according to the rules. For each BIP, transactions of versions 1 and 2 will be tested. ---------------- BIP 113: bip113tx - modify the nLocktime variable BIP 68: bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below BIP 112: bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP bip112tx_special - test negative argument to OP_CSV """ from decimal import Decimal from itertools import product import time from test_framework.blocktools import ( create_block, create_coinbase, make_conform_to_ctor, ) from test_framework.messages import COIN, CTransaction, FromHex, ToHex from test_framework.mininode import P2PDataStore from test_framework.script import ( CScript, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import assert_equal BASE_RELATIVE_LOCKTIME = 10 SEQ_DISABLE_FLAG = 1 << 31 SEQ_RANDOM_HIGH_BIT = 1 << 25 SEQ_TYPE_FLAG = 1 << 22 SEQ_RANDOM_LOW_BIT = 1 << 18 def relative_locktime(sdf, srhb, stf, srlb): """Returns a locktime with certain bits set.""" locktime = BASE_RELATIVE_LOCKTIME if sdf: locktime |= SEQ_DISABLE_FLAG if srhb: locktime |= SEQ_RANDOM_HIGH_BIT if stf: locktime |= SEQ_TYPE_FLAG if srlb: locktime |= SEQ_RANDOM_LOW_BIT return locktime def all_rlt_txs(txs): return [tx['tx'] for tx in txs] def get_csv_status(node): height = node.getblockchaininfo()['blocks'] return height >= 576 def create_transaction(node, txid, to_address, amount): inputs = [{"txid": txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) tx = FromHex(CTransaction(), rawtx) return tx def sign_transaction(node, unsignedtx): rawtx = ToHex(unsignedtx) signresult = node.signrawtransactionwithwallet(rawtx) tx = FromHex(CTransaction(), signresult['hex']) return tx def spend_tx(node, prev_tx, address): spendtx = create_transaction( node, prev_tx.hash, address, (prev_tx.vout[0].nValue - 1000) / COIN) spendtx.nVersion = prev_tx.nVersion pad_tx(spendtx) spendtx.rehash() return spendtx def create_bip112special(node, input, txversion, address): tx = create_transaction( node, input, address, Decimal("49.98")) tx.nVersion = txversion tx.vout[0].scriptPubKey = CScript( [-1, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE]) tx.rehash() signtx = sign_transaction(node, tx) signtx.rehash() return signtx def send_generic_input_tx(node, coinbases, address): amount = Decimal("49.99") return node.sendrawtransaction(ToHex(sign_transaction(node, create_transaction(node, node.getblock(coinbases.pop())['tx'][0], address, amount)))) def create_bip68txs(node, bip68inputs, txversion, address, locktime_delta=0): """Returns a list of bip68 transactions with different bits set.""" txs = [] assert len(bip68inputs) >= 16 for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)): locktime = relative_locktime(sdf, srhb, stf, srlb) tx = create_transaction( node, bip68inputs[i], address, Decimal("49.98")) tx.nVersion = txversion tx.vin[0].nSequence = locktime + locktime_delta tx = sign_transaction(node, tx) tx.rehash() txs.append({'tx': tx, 'sdf': sdf, 'stf': stf}) return txs def create_bip112txs(node, bip112inputs, varyOP_CSV, txversion, address, locktime_delta=0): """Returns a list of bip112 transactions with different bits set.""" txs = [] assert len(bip112inputs) >= 16 for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)): locktime = relative_locktime(sdf, srhb, stf, srlb) tx = create_transaction( node, bip112inputs[i], address, Decimal("49.98")) if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME + locktime_delta else: # vary nSequence instead, OP_CSV is fixed tx.vin[0].nSequence = locktime + locktime_delta tx.nVersion = txversion if (varyOP_CSV): tx.vout[0].scriptPubKey = CScript( [locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE]) else: tx.vout[0].scriptPubKey = CScript( [BASE_RELATIVE_LOCKTIME, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE]) tx.rehash() signtx = sign_transaction(node, tx) signtx.rehash() txs.append({'tx': signtx, 'sdf': sdf, 'stf': stf}) return txs class BIP68_112_113Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4']] def generate_blocks(self, number): test_blocks = [] for i in range(number): block = self.create_test_block([]) test_blocks.append(block) self.last_block_time += 600 self.tip = block.sha256 self.tipheight += 1 return test_blocks def create_test_block(self, txs, version=536870912): block = create_block(self.tip, create_coinbase( self.tipheight + 1), self.last_block_time + 600) block.nVersion = version block.vtx.extend(txs) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() return block # Create a block with given txs, and spend these txs in the same block. # Spending utxos in the same block is OK as long as nSequence is not enforced. # Otherwise a number of intermediate blocks should be generated, and this # method should not be used. def create_test_block_spend_utxos(self, node, txs, version=536870912): block = self.create_test_block(txs, version) block.vtx.extend([spend_tx(node, tx, self.nodeaddress) for tx in txs]) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() return block def sync_blocks(self, blocks, success=True): """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. Call with success = False if the tip shouldn't advance to the most recent block.""" self.nodes[0].p2p.send_blocks_and_test( blocks, self.nodes[0], success=success) def run_test(self): self.nodes[0].add_p2p_connection(P2PDataStore()) self.log.info("Generate blocks in the past for coinbase outputs.") # Enough to build up to 1000 blocks 10 minutes apart without worrying # about getting into the future long_past_time = int(time.time()) - 600 * 1000 # Enough so that the generated blocks will still all be before long_past_time self.nodes[0].setmocktime(long_past_time - 100) # 82 blocks generated for inputs self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2 * 32 + 1) # Set time back to present so yielded blocks aren't in the future as # we advance last_block_time self.nodes[0].setmocktime(0) # height of the next block to build self.tipheight = 82 self.last_block_time = long_past_time self.tip = int(self.nodes[0].getbestblockhash(), 16) self.nodeaddress = self.nodes[0].getnewaddress() # CSV is not activated yet. assert_equal(get_csv_status(self.nodes[0]), False) # Generate 489 more version 4 blocks test_blocks = self.generate_blocks(489) # Test #1 self.sync_blocks(test_blocks) # Still not activated. assert_equal(get_csv_status(self.nodes[0]), False) # Inputs at height = 572 # # Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block) # Note we reuse inputs for v1 and v2 txs so must test these separately # 16 normal inputs bip68inputs = [] for i in range(16): bip68inputs.append(send_generic_input_tx( self.nodes[0], self.coinbase_blocks, self.nodeaddress)) # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112basicinputs = [] for j in range(2): inputs = [] for i in range(16): inputs.append(send_generic_input_tx( self.nodes[0], self.coinbase_blocks, self.nodeaddress)) bip112basicinputs.append(inputs) # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112diverseinputs = [] for j in range(2): inputs = [] for i in range(16): inputs.append(send_generic_input_tx( self.nodes[0], self.coinbase_blocks, self.nodeaddress)) bip112diverseinputs.append(inputs) # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112specialinput = send_generic_input_tx( self.nodes[0], self.coinbase_blocks, self.nodeaddress) # 1 normal input bip113input = send_generic_input_tx( self.nodes[0], self.coinbase_blocks, self.nodeaddress) self.nodes[0].setmocktime(self.last_block_time + 600) # 1 block generated for inputs to be in chain at height 572 inputblockhash = self.nodes[0].generate(1)[0] self.nodes[0].setmocktime(0) self.tip = int(inputblockhash, 16) self.tipheight += 1 self.last_block_time += 600 assert_equal(len(self.nodes[0].getblock( inputblockhash, True)["tx"]), 82 + 1) # 2 more version 4 blocks test_blocks = self.generate_blocks(2) # Test #2 self.sync_blocks(test_blocks) self.log.info( "Not yet activated, height = 574 (will activate for block 576, not 575)") assert_equal(get_csv_status(self.nodes[0]), False) # Test both version 1 and version 2 transactions for all tests # BIP113 test transaction will be modified before each use to # put in appropriate block time bip113tx_v1 = create_transaction( self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE bip113tx_v1.nVersion = 1 bip113tx_v2 = create_transaction( self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE bip113tx_v2.nVersion = 2 # For BIP68 test all 16 relative sequence locktimes bip68txs_v1 = create_bip68txs( self.nodes[0], bip68inputs, 1, self.nodeaddress) bip68txs_v2 = create_bip68txs( self.nodes[0], bip68inputs, 2, self.nodeaddress) # For BIP112 test: # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs bip112txs_vary_nSequence_v1 = create_bip112txs( self.nodes[0], bip112basicinputs[0], False, 1, self.nodeaddress) bip112txs_vary_nSequence_v2 = create_bip112txs( self.nodes[0], bip112basicinputs[0], False, 2, self.nodeaddress) # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs bip112txs_vary_nSequence_9_v1 = create_bip112txs( self.nodes[0], bip112basicinputs[1], False, 1, self.nodeaddress, -1) bip112txs_vary_nSequence_9_v2 = create_bip112txs( self.nodes[0], bip112basicinputs[1], False, 2, self.nodeaddress, -1) # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs bip112txs_vary_OP_CSV_v1 = create_bip112txs( self.nodes[0], bip112diverseinputs[0], True, 1, self.nodeaddress) bip112txs_vary_OP_CSV_v2 = create_bip112txs( self.nodes[0], bip112diverseinputs[0], True, 2, self.nodeaddress) # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs bip112txs_vary_OP_CSV_9_v1 = create_bip112txs( self.nodes[0], bip112diverseinputs[1], True, 1, self.nodeaddress, -1) bip112txs_vary_OP_CSV_9_v2 = create_bip112txs( self.nodes[0], bip112diverseinputs[1], True, 2, self.nodeaddress, -1) # -1 OP_CSV OP_DROP input bip112tx_special_v1 = create_bip112special( self.nodes[0], bip112specialinput, 1, self.nodeaddress) bip112tx_special_v2 = create_bip112special( self.nodes[0], bip112specialinput, 2, self.nodeaddress) self.log.info("TESTING") self.log.info("Pre-Soft Fork Tests. All txs should pass.") self.log.info("Test version 1 txs") success_txs = [] # add BIP113 tx and -1 CSV tx # = MTP of prior block (not <) but < time put on current block bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) success_txs.append(bip113signed1) success_txs.append(bip112tx_special_v1) success_txs.append( spend_tx(self.nodes[0], bip112tx_special_v1, self.nodeaddress)) # add BIP 68 txs success_txs.extend(all_rlt_txs(bip68txs_v1)) # add BIP 112 with seq=10 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_nSequence_v1)]) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_OP_CSV_v1)]) # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_nSequence_9_v1)]) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)]) # Test #3 self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("Test version 2 txs") success_txs = [] # add BIP113 tx and -1 CSV tx # = MTP of prior block (not <) but < time put on current block bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) success_txs.append(bip113signed2) success_txs.append(bip112tx_special_v2) success_txs.append( spend_tx(self.nodes[0], bip112tx_special_v2, self.nodeaddress)) # add BIP 68 txs success_txs.extend(all_rlt_txs(bip68txs_v2)) # add BIP 112 with seq=10 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_nSequence_v2)]) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_OP_CSV_v2)]) # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_nSequence_9_v2)]) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)]) # Test #4 self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # 1 more version 4 block to get us to height 575 so the fork should # now be active for the next block test_blocks = self.generate_blocks(1) # Test #5 self.sync_blocks(test_blocks) assert_equal(get_csv_status(self.nodes[0]), False) self.nodes[0].generate(1) assert_equal(get_csv_status(self.nodes[0]), True) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("Post-Soft Fork Tests.") self.log.info("BIP 113 tests") # BIP 113 tests should now fail regardless of version number # if nLockTime isn't satisfied by new rules # = MTP of prior block (not <) but < time put on current block bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) # = MTP of prior block (not <) but < time put on current block bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: # Test #6, Test #7 self.sync_blocks( [self.create_test_block([bip113tx])], success=False) # BIP 113 tests should now pass if the locktime is < MTP # < MTP of prior block bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) # < MTP of prior block bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: # Test #8, Test #9 self.sync_blocks([self.create_test_block([bip113tx])]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Next block height = 580 after 4 blocks of random version test_blocks = self.generate_blocks(4) # Test #10 self.sync_blocks(test_blocks) self.log.info("BIP 68 tests") self.log.info("Test version 1 txs - all should still pass") success_txs = [] success_txs.extend(all_rlt_txs(bip68txs_v1)) # Test #11 self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("Test version 2 txs") # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']] self.sync_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # All txs without flag fail as we are at delta height = 8 < 10 and # delta time = 8 * 600 < 10 * 512 bip68timetxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']] for tx in bip68timetxs: # Test #13 - Test #16 self.sync_blocks([self.create_test_block([tx])], success=False) bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']] for tx in bip68heighttxs: # Test #17 - Test #20 self.sync_blocks([self.create_test_block([tx])], success=False) # Advance one block to 581 test_blocks = self.generate_blocks(1) # Test #21 self.sync_blocks(test_blocks,) # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512 bip68success_txs.extend(bip68timetxs) # Test #22 self.sync_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) for tx in bip68heighttxs: # Test #23 - Test #26 self.sync_blocks([self.create_test_block([tx])], success=False) # Advance one block to 582 test_blocks = self.generate_blocks(1) # Test #27 self.sync_blocks(test_blocks) # All BIP 68 txs should pass bip68success_txs.extend(bip68heighttxs) # Test #28 self.sync_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("BIP 112 tests") self.log.info("Test version 1 txs") # -1 OP_CSV tx should fail # Test #29 self.sync_blocks([self.create_test_block_spend_utxos( self.nodes[0], [bip112tx_special_v1])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, # version 1 txs should still pass success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']] success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf']] # Test #30 self.sync_blocks( [self.create_test_block_spend_utxos(self.nodes[0], success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, # version 1 txs should now fail fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1) fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1) fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']] fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']] for tx in fail_txs: # Test #31 - Test #78 self.sync_blocks([self.create_test_block_spend_utxos( self.nodes[0], [tx])], success=False) self.log.info("Test version 2 txs") # -1 OP_CSV tx should fail # Test #79 self.sync_blocks([self.create_test_block_spend_utxos( self.nodes[0], [bip112tx_special_v2])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, # version 2 txs should pass success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']] success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf']] # Test #80 self.sync_blocks( [self.create_test_block_spend_utxos(self.nodes[0], success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ## # All txs with nSequence 9 should fail either due to earlier mismatch # or failing the CSV check fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2) fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']] for tx in fail_txs: # Test #81 - Test #104 self.sync_blocks([self.create_test_block_spend_utxos( self.nodes[0], [tx])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf']] for tx in fail_txs: # Test #105 - Test #112 self.sync_blocks([self.create_test_block_spend_utxos( self.nodes[0], [tx])], success=False) # If sequencelock types mismatch, tx should fail fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']] fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']] for tx in fail_txs: # Test #113 - Test #120 self.sync_blocks([self.create_test_block_spend_utxos( self.nodes[0], [tx])], success=False) # Remaining txs should pass, just test masking works properly success_txs = [ tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']] success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf']] # Test #121 self.sync_blocks([self.create_test_block(success_txs)]) # Spending the previous block utxos requires a difference of 10 blocks (nSequence = 10). # Generate 9 blocks then spend in the 10th block = self.nodes[0].getbestblockhash() self.last_block_time += 600 self.tip = int("0x" + block, 0) self.tipheight += 1 # Test #122 self.sync_blocks(self.generate_blocks(9)) spend_txs = [] for tx in success_txs: raw_tx = spend_tx(self.nodes[0], tx, self.nodeaddress) raw_tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME raw_tx.rehash() spend_txs.append(raw_tx) # Test #123 self.sync_blocks([self.create_test_block(spend_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Additional test, of checking that comparison of two time types works properly time_txs = [] for tx in [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]: signtx = sign_transaction(self.nodes[0], tx) time_txs.append(signtx) # Test #124 self.sync_blocks([self.create_test_block(time_txs)]) # Spending the previous block utxos requires a block time difference of # at least 10 * 512s (nSequence = 10). # Generate 8 blocks then spend in the 9th (9 * 600 > 10 * 512) block = self.nodes[0].getbestblockhash() self.last_block_time += 600 self.tip = int("0x" + block, 0) self.tipheight += 1 # Test #125 self.sync_blocks(self.generate_blocks(8)) spend_txs = [] for tx in time_txs: raw_tx = spend_tx(self.nodes[0], tx, self.nodeaddress) raw_tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME | SEQ_TYPE_FLAG raw_tx.rehash() spend_txs.append(raw_tx) # Test #126 self.sync_blocks([self.create_test_block(spend_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # TODO: Test empty stack fails if __name__ == '__main__': BIP68_112_113Test().main() diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py index 2dc06ff57..fd7ad5e62 100755 --- a/test/functional/feature_dersig.py +++ b/test/functional/feature_dersig.py @@ -1,133 +1,133 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test BIP66 (DER SIG). Test that the DERSIG soft-fork activates at (regtest) height 1251. """ from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import CTransaction, FromHex, msg_block, ToHex from test_framework.mininode import ( mininode_lock, P2PInterface, ) from test_framework.script import CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, wait_until DERSIG_HEIGHT = 1251 # Reject codes that we might receive in this test REJECT_INVALID = 16 REJECT_OBSOLETE = 17 REJECT_NONSTANDARD = 64 # A canonical signature consists of: # <30> <02> <02> def unDERify(tx): """ Make the signature in vin 0 of a tx non-DER-compliant, by adding padding after the S-value. """ scriptSig = CScript(tx.vin[0].scriptSig) newscript = [] for i in scriptSig: if (len(newscript) == 0): newscript.append(i[0:-1] + b'\0' + i[-1:]) else: newscript.append(i) tx.vin[0].scriptSig = CScript(newscript) def create_transaction(node, coinbase, to_address, amount): from_txid = node.getblock(coinbase)['tx'][0] inputs = [{"txid": from_txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransactionwithwallet(rawtx) return FromHex(CTransaction(), signresult['hex']) class BIP66Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [['-whitelist=127.0.0.1', '-enablebip61']] self.setup_clean_chain = True def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info("Mining {} blocks".format(DERSIG_HEIGHT - 1)) self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 1) self.nodeaddress = self.nodes[0].getnewaddress() self.log.info("Test that blocks must now be at least version 3") tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block( int(tip, 16), create_coinbase(DERSIG_HEIGHT), block_time) block.nVersion = 2 block.rehash() block.solve() with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000002)'.format(block.hash)]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) self.nodes[0].p2p.sync_with_ping() self.log.info( "Test that transactions with non-DER signatures cannot appear in a block") block.nVersion = 3 spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) unDERify(spendtx) spendtx.rehash() # First we show that this tx is valid except for DERSIG by getting it # rejected from the mempool for exactly that reason. assert_equal( [{'txid': spendtx.hash, 'allowed': False, 'reject-reason': '16: mandatory-script-verify-flag-failed (Non-canonical DER signature)'}], self.nodes[0].testmempoolaccept( rawtxs=[ToHex(spendtx)], allowhighfees=True) ) # Now we verify that a block with this transaction is also invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() with self.nodes[0].assert_debug_log(expected_msgs=['ConnectBlock {} failed (blk-bad-inputs'.format(block.hash)]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) self.nodes[0].p2p.sync_with_ping() wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert self.nodes[0].p2p.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD] assert_equal( self.nodes[0].p2p.last_message["reject"].data, block.sha256) assert b'blk-bad-inputs' in self.nodes[0].p2p.last_message["reject"].reason self.log.info( "Test that a version 3 block with a DERSIG-compliant transaction is accepted") block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) if __name__ == '__main__': BIP66Test().main() diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py index bdf3cf693..3e320dc87 100755 --- a/test/functional/feature_notifications.py +++ b/test/functional/feature_notifications.py @@ -1,95 +1,95 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Copyright (c) 2018 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the -alertnotify, -blocknotify and -walletnotify options.""" import os from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, connect_nodes_bi, wait_until FORK_WARNING_MESSAGE = "Warning: Large-work fork detected, forking after block {}\n" class NotificationsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True def setup_network(self): self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt") self.block_filename = os.path.join(self.options.tmpdir, "blocks.txt") self.tx_filename = os.path.join( self.options.tmpdir, "transactions.txt") # -alertnotify and -blocknotify on node0, walletnotify on node1 self.extra_args = [["-blockversion=2", "-alertnotify=echo %s >> {}".format( self.alert_filename), "-blocknotify=echo %s >> {}".format(self.block_filename)], ["-blockversion=211", "-rescan", "-walletnotify=echo %s >> {}".format(self.tx_filename)]] super().setup_network() def run_test(self): self.log.info("test -blocknotify") block_count = 10 blocks = self.nodes[1].generate(block_count) # wait at most 10 seconds for expected file size before reading the content wait_until(lambda: os.path.isfile(self.block_filename) and os.stat( self.block_filename).st_size >= (block_count * 65), timeout=10) # file content should equal the generated blocks hashes with open(self.block_filename, 'r', encoding="utf-8") as f: assert_equal(sorted(blocks), sorted(f.read().splitlines())) self.log.info("test -walletnotify") # wait at most 10 seconds for expected file size before reading the content wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat( self.tx_filename).st_size >= (block_count * 65), timeout=10) # file content should equal the generated transaction hashes txids_rpc = list( map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) with open(self.tx_filename, 'r', encoding="ascii") as f: assert_equal(sorted(txids_rpc), sorted(f.read().splitlines())) os.remove(self.tx_filename) self.log.info("test -walletnotify after rescan") # restart node to rescan to force wallet notifications self.restart_node(1) connect_nodes_bi(self.nodes[0], self.nodes[1]) wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat( self.tx_filename).st_size >= (block_count * 65), timeout=10) # file content should equal the generated transaction hashes txids_rpc = list( map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) with open(self.tx_filename, 'r', encoding="ascii") as f: assert_equal(sorted(txids_rpc), sorted(f.read().splitlines())) # Create an invalid chain and ensure the node warns. self.log.info("test -alertnotify for forked chain") fork_block = self.nodes[0].getbestblockhash() self.nodes[0].generate(1) invalid_block = self.nodes[0].getbestblockhash() self.nodes[0].generate(7) # Invalidate a large branch, which should trigger an alert. self.nodes[0].invalidateblock(invalid_block) # Give bitcoind 10 seconds to write the alert notification wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10) self.log.info(self.alert_filename) with open(self.alert_filename, 'r', encoding='utf8') as f: assert_equal(f.read(), (FORK_WARNING_MESSAGE.format(fork_block))) if __name__ == '__main__': NotificationsTest().main() diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py index cb9b6ccb5..af3b3b581 100755 --- a/test/functional/feature_nulldummy.py +++ b/test/functional/feature_nulldummy.py @@ -1,124 +1,124 @@ #!/usr/bin/env python3 -# Copyright (c) 2016 The Bitcoin Core developers +# Copyright (c) 2016-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test NULLDUMMY softfork. Connect to a single node. Generate 2 blocks (save the coinbases for later). Generate 427 more blocks. [Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block. [Policy] Check that non-NULLDUMMY transactions are rejected before activation. [Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block. [Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block. """ import time from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import CTransaction, FromHex, ToHex from test_framework.script import CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error # This test checks for a reject reason that changes after the graviton # upgrade. Since the nulldummy effect and this test are destined to be removed # after the upgrade anyway, we run this test pre-upgrade only. # More detailed dummy tests can be found in abc-schnorrmultisig-activation.py. GRAVITON_START_TIME = 2000000000 NULLDUMMY_ERROR = "non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero) (code 64)" def trueDummy(tx): scriptSig = CScript(tx.vin[0].scriptSig) newscript = [] for i in scriptSig: if (len(newscript) == 0): assert len(i) == 0 newscript.append(b'\x51') else: newscript.append(i) tx.vin[0].scriptSig = CScript(newscript) tx.rehash() class NULLDUMMYTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.extra_args = [['-whitelist=127.0.0.1', "-gravitonactivationtime={}".format(GRAVITON_START_TIME)]] def run_test(self): self.address = self.nodes[0].getnewaddress() self.ms_address = self.nodes[0].addmultisigaddress(1, [self.address])[ 'address'] # Block 2 self.coinbase_blocks = self.nodes[0].generate(2) coinbase_txid = [] for i in self.coinbase_blocks: coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0]) # Block 429 self.nodes[0].generate(427) self.lastblockhash = self.nodes[0].getbestblockhash() self.tip = int("0x" + self.lastblockhash, 0) self.lastblockheight = 429 self.lastblocktime = int(time.time()) + 429 self.log.info( "Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]") test1txs = [self.create_transaction( self.nodes[0], coinbase_txid[0], self.ms_address, 49)] txid1 = self.nodes[0].sendrawtransaction(ToHex(test1txs[0]), True) test1txs.append(self.create_transaction( self.nodes[0], txid1, self.ms_address, 48)) txid2 = self.nodes[0].sendrawtransaction(ToHex(test1txs[1]), True) self.block_submit(self.nodes[0], test1txs, True) self.log.info( "Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation") test2tx = self.create_transaction( self.nodes[0], txid2, self.ms_address, 48) trueDummy(test2tx) assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, ToHex(test2tx), True) self.log.info( "Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]") self.block_submit(self.nodes[0], [test2tx], True) def create_transaction(self, node, txid, to_address, amount): inputs = [{"txid": txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransactionwithwallet(rawtx) return FromHex(CTransaction(), signresult['hex']) def block_submit(self, node, txs, accept=False): block = create_block(self.tip, create_coinbase( self.lastblockheight + 1), self.lastblocktime + 1) block.nVersion = 4 for tx in txs: tx.rehash() block.vtx.append(tx) block.vtx = [block.vtx[0]] + \ sorted(block.vtx[1:], key=lambda tx: tx.get_id()) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node.submitblock(ToHex(block)) if (accept): assert_equal(node.getbestblockhash(), block.hash) self.tip = block.sha256 self.lastblockhash = block.hash self.lastblocktime += 1 self.lastblockheight += 1 else: assert_equal(node.getbestblockhash(), self.lastblockhash) if __name__ == '__main__': NULLDUMMYTest().main() diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py index 4e6b1abe1..b18122743 100755 --- a/test/functional/feature_proxy.py +++ b/test/functional/feature_proxy.py @@ -1,230 +1,230 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test bitcoind with different proxy configuration. Test plan: - Start bitcoind's with different proxy configurations - Use addnode to initiate connections - Verify that proxies are connected to, and the right connection command is given - Proxy configurations to test on bitcoind side: - `-proxy` (proxy everything) - `-onion` (proxy just onions) - `-proxyrandomize` Circuit randomization - Proxy configurations to test on proxy side, - support no authentication (other proxy) - support no authentication + user/pass authentication (Tor) - proxy on IPv6 - Create various proxies (as threads) - Create bitcoinds that connect to them - Manipulate the bitcoinds using addnode (onetry) an observe effects addnode connect to IPv4 addnode connect to IPv6 addnode connect to onion addnode connect to generic DNS name """ import os import socket from test_framework.netutil import test_ipv6_local from test_framework.socks5 import ( AddressType, Socks5Command, Socks5Configuration, Socks5Server, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, PORT_MIN, PORT_RANGE, ) RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports class ProxyTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 def setup_nodes(self): self.have_ipv6 = test_ipv6_local() # Create two proxies on different ports # ... one unauthenticated self.conf1 = Socks5Configuration() self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000)) self.conf1.unauth = True self.conf1.auth = False # ... one supporting authenticated and unauthenticated (Tor) self.conf2 = Socks5Configuration() self.conf2.addr = ( '127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000)) self.conf2.unauth = True self.conf2.auth = True if self.have_ipv6: # ... one on IPv6 with similar configuration self.conf3 = Socks5Configuration() self.conf3.af = socket.AF_INET6 self.conf3.addr = ( '::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000)) self.conf3.unauth = True self.conf3.auth = True else: self.log.warning("Testing without local IPv6 support") self.serv1 = Socks5Server(self.conf1) self.serv1.start() self.serv2 = Socks5Server(self.conf2) self.serv2.start() if self.have_ipv6: self.serv3 = Socks5Server(self.conf3) self.serv3.start() # Note: proxies are not used to connect to local nodes # this is because the proxy to use is based on CService.GetNetwork(), # which return NET_UNROUTABLE for localhost args = [ ['-listen', '-proxy={}:{}'.format( self.conf1.addr[0], self.conf1.addr[1]), '-proxyrandomize=1'], ['-listen', '-proxy={}:{}'.format( self.conf1.addr[0], self.conf1.addr[1]), '-onion={}:{}'.format( self.conf2.addr[0], self.conf2.addr[1]), '-proxyrandomize=0'], ['-listen', '-proxy={}:{}'.format( self.conf2.addr[0], self.conf2.addr[1]), '-proxyrandomize=1'], [] ] if self.have_ipv6: args[3] = ['-listen', '-proxy=[{}]:{}'.format( self.conf3.addr[0], self.conf3.addr[1]), '-proxyrandomize=0', '-noonion'] self.add_nodes(self.num_nodes, extra_args=args) self.start_nodes() def node_test(self, node, proxies, auth, test_onion=True): rv = [] # Test: outgoing IPv4 connection through node node.addnode("15.61.23.23:1234", "onetry") cmd = proxies[0].queue.get() assert isinstance(cmd, Socks5Command) # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, # even if connecting directly to IPv4/IPv6 assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"15.61.23.23") assert_equal(cmd.port, 1234) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) if self.have_ipv6: # Test: outgoing IPv6 connection through node node.addnode( "[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry") cmd = proxies[1].queue.get() assert isinstance(cmd, Socks5Command) # Note: bitcoind's SOCKS5 implementation only sends atyp # DOMAINNAME, even if connecting directly to IPv4/IPv6 assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534") assert_equal(cmd.port, 5443) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) if test_onion: # Test: outgoing onion connection through node node.addnode("bitcoinostk4e4re.onion:8333", "onetry") cmd = proxies[2].queue.get() assert isinstance(cmd, Socks5Command) assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"bitcoinostk4e4re.onion") assert_equal(cmd.port, 8333) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) # Test: outgoing DNS name connection through node node.addnode("node.noumenon:8333", "onetry") cmd = proxies[3].queue.get() assert isinstance(cmd, Socks5Command) assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"node.noumenon") assert_equal(cmd.port, 8333) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) return rv def run_test(self): # basic -proxy self.node_test( self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False) # -proxy plus -onion self.node_test( self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False) # -proxy plus -onion, -proxyrandomize rv = self.node_test( self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True) # Check that credentials as used for -proxyrandomize connections are # unique credentials = set((x.username, x.password) for x in rv) assert_equal(len(credentials), len(rv)) if self.have_ipv6: # proxy on IPv6 localhost self.node_test( self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False) def networks_dict(d): r = {} for x in d['networks']: r[x['name']] = x return r # test RPC getnetworkinfo n0 = networks_dict(self.nodes[0].getnetworkinfo()) for net in ['ipv4', 'ipv6', 'onion']: assert_equal(n0[net]['proxy'], '{}:{}'.format( self.conf1.addr[0], self.conf1.addr[1])) assert_equal(n0[net]['proxy_randomize_credentials'], True) assert_equal(n0['onion']['reachable'], True) n1 = networks_dict(self.nodes[1].getnetworkinfo()) for net in ['ipv4', 'ipv6']: assert_equal(n1[net]['proxy'], '{}:{}'.format( self.conf1.addr[0], self.conf1.addr[1])) assert_equal(n1[net]['proxy_randomize_credentials'], False) assert_equal(n1['onion']['proxy'], '{}:{}'.format( self.conf2.addr[0], self.conf2.addr[1])) assert_equal(n1['onion']['proxy_randomize_credentials'], False) assert_equal(n1['onion']['reachable'], True) n2 = networks_dict(self.nodes[2].getnetworkinfo()) for net in ['ipv4', 'ipv6', 'onion']: assert_equal(n2[net]['proxy'], '{}:{}'.format( self.conf2.addr[0], self.conf2.addr[1])) assert_equal(n2[net]['proxy_randomize_credentials'], True) assert_equal(n2['onion']['reachable'], True) if self.have_ipv6: n3 = networks_dict(self.nodes[3].getnetworkinfo()) for net in ['ipv4', 'ipv6']: assert_equal(n3[net]['proxy'], '[{}]:{}'.format( self.conf3.addr[0], self.conf3.addr[1])) assert_equal(n3[net]['proxy_randomize_credentials'], False) assert_equal(n3['onion']['reachable'], False) if __name__ == '__main__': ProxyTest().main() diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index 43a83aed3..bfb14b1f8 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -1,518 +1,518 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the pruning code. WARNING: This test uses 4GB of disk space. This test takes 30 mins or more (up to 2 hours) """ import os from test_framework.blocktools import mine_big_block from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, sync_blocks, wait_until, ) MIN_BLOCKS_TO_KEEP = 288 # Rescans start at the earliest block up to 2 hours before a key timestamp, so # the manual prune RPC avoids pruning blocks in the same window to be # compatible with pruning based on key creation time. TIMESTAMP_WINDOW = 2 * 60 * 60 def calc_usage(blockdir): return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.) class PruneTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 6 # Create nodes 0 and 1 to mine. # Create node 2 to test pruning. self.full_node_default_args = ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-checkblocks=5", "-noparkdeepreorg", "-maxreorgdepth=-1", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000"] # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) # Create nodes 5 to test wallet in prune mode, but do not connect self.extra_args = [self.full_node_default_args, self.full_node_default_args, ["-maxreceivebuffer=20000", "-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-prune=550"]] def setup_network(self): self.setup_nodes() self.prunedir = os.path.join( self.nodes[2].datadir, 'regtest', 'blocks', '') connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[1], self.nodes[2]) connect_nodes(self.nodes[2], self.nodes[0]) connect_nodes(self.nodes[0], self.nodes[3]) connect_nodes(self.nodes[0], self.nodes[4]) sync_blocks(self.nodes[0:5]) def setup_nodes(self): self.add_nodes(self.num_nodes, self.extra_args, timewait=900) self.start_nodes() def create_big_chain(self): # Start by creating some coinbases we can spend later self.nodes[1].generate(200) sync_blocks(self.nodes[0:2]) self.nodes[0].generate(150) # Then mine enough full blocks to create more than 550MiB of data for i in range(645): mine_big_block(self.nodes[0], self.utxo_cache_0) sync_blocks(self.nodes[0:5]) def test_height_min(self): if not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")): raise AssertionError("blk00000.dat is missing, pruning too early") self.log.info("Success") self.log.info("Though we're already using more than 550MiB, current usage: {}".format( calc_usage(self.prunedir))) self.log.info( "Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full # blocks past the height cutoff will ensure this for i in range(25): mine_big_block(self.nodes[0], self.utxo_cache_0) # Wait for blk00000.dat to be pruned wait_until(lambda: not os.path.isfile( os.path.join(self.prunedir, "blk00000.dat")), timeout=30) self.log.info("Success") usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: {}".format(usage)) if (usage > 550): raise AssertionError("Pruning target not being met") def create_chain_with_staleblocks(self): # Create stale blocks in manageable sized chunks self.log.info( "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") for j in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects # Stopping node 0 also clears its mempool, so it doesn't have # node1's transactions to accidentally mine self.stop_node(0) self.start_node(0, extra_args=self.full_node_default_args) # Mine 24 blocks in node 1 for i in range(24): if j == 0: mine_big_block(self.nodes[1], self.utxo_cache_1) else: # Add node1's wallet transactions back to the mempool, to # avoid the mined blocks from being too small. self.nodes[1].resendwallettransactions() # tx's already in mempool from previous disconnects self.nodes[1].generate(1) # Reorg back with 25 block chain from node 0 for i in range(25): mine_big_block(self.nodes[0], self.utxo_cache_0) # Create connections in the order so both nodes can see the reorg # at the same time connect_nodes(self.nodes[1], self.nodes[0]) connect_nodes(self.nodes[2], self.nodes[0]) sync_blocks(self.nodes[0:3]) self.log.info("Usage can be over target because of high stale rate: {}".format( calc_usage(self.prunedir))) def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node # 0 and Node 2's tip. This will cause Node 2 to do a reorg requiring # 288 blocks of undo data to the reorg_test chain. Reboot node 1 to # clear its mempool (hopefully make the invalidate faster). Lower the # block max size so we don't keep mining all our big mempool # transactions (from disconnected blocks) self.stop_node(1) self.start_node(1, extra_args=[ "-maxreceivebuffer=20000", "-blockmaxsize=5000", "-checkblocks=5", "-noparkdeepreorg", "-maxreorgdepth=-1"]) height = self.nodes[1].getblockcount() self.log.info("Current block height: {}".format(height)) invalidheight = height - 287 badhash = self.nodes[1].getblockhash(invalidheight) self.log.info("Invalidating block {} at height {}".format( badhash, invalidheight)) self.nodes[1].invalidateblock(badhash) # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want. # So invalidate that fork as well, until we're on the same chain as # node 0/2 (but at an ancestor 288 blocks ago) mainchainhash = self.nodes[0].getblockhash(invalidheight - 1) curhash = self.nodes[1].getblockhash(invalidheight - 1) while curhash != mainchainhash: self.nodes[1].invalidateblock(curhash) curhash = self.nodes[1].getblockhash(invalidheight - 1) assert self.nodes[1].getblockcount() == invalidheight - 1 self.log.info("New best height: {}".format( self.nodes[1].getblockcount())) # Reboot node1 to clear those giant tx's from mempool self.stop_node(1) self.start_node(1, extra_args=[ "-maxreceivebuffer=20000", "-blockmaxsize=5000", "-checkblocks=5", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) self.log.info("Reconnect nodes") connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[2], self.nodes[1]) sync_blocks(self.nodes[0:3], timeout=120) self.log.info("Verify height on node 2: {}".format( self.nodes[2].getblockcount())) self.log.info("Usage possibly still high bc of stale blocks in block files: {}".format( calc_usage(self.prunedir))) self.log.info( "Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") # Get node0's wallet transactions back in its mempool, to avoid the # mined blocks from being too small. self.nodes[0].resendwallettransactions() for i in range(22): # This can be slow, so do this in multiple RPC calls to avoid # RPC timeouts. # node 0 has many large tx's in its mempool from the disconnects self.nodes[0].generate(10) sync_blocks(self.nodes[0:3], timeout=300) usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: {}".format(usage)) if (usage > 550): raise AssertionError("Pruning target not being met") return invalidheight, badhash def reorg_back(self): # Verify that a block on the old main chain fork has been pruned away assert_raises_rpc_error( -1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) self.log.info( "Will need to redownload block {}".format(self.forkheight)) # Verify that we have enough history to reorg back to the fork point. # Although this is more than 288 blocks, because this chain was written # more recently and only its other 299 small and 220 large blocks are in # the block files after it, it is expected to still be retained. self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) first_reorg_height = self.nodes[2].getblockcount() curchainhash = self.nodes[2].getblockhash(self.mainchainheight) self.nodes[2].invalidateblock(curchainhash) goalbestheight = self.mainchainheight goalbesthash = self.mainchainhash2 # As of 0.10 the current block download logic is not able to reorg to # the original chain created in create_chain_with_stale_blocks because # it doesn't know of any peer that's on that chain from which to # redownload its missing blocks. Invalidate the reorg_test chain in # node 0 as well, it can successfully switch to the original chain # because it has all the block data. However it must mine enough blocks # to have a more work chain than the reorg_test chain in order to # trigger node 2's block download logic. At this point node 2 is within # 288 blocks of the fork point so it will preserve its ability to # reorg. if self.nodes[2].getblockcount() < self.mainchainheight: blocks_to_mine = first_reorg_height + 1 - self.mainchainheight self.log.info( "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: {}".format( blocks_to_mine)) self.nodes[0].invalidateblock(curchainhash) assert self.nodes[0].getblockcount() == self.mainchainheight assert self.nodes[0].getbestblockhash() == self.mainchainhash2 goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] goalbestheight = first_reorg_height + 1 self.log.info( "Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") # Wait for Node 2 to reorg to proper height wait_until(lambda: self.nodes[2].getblockcount( ) >= goalbestheight, timeout=900) assert self.nodes[2].getbestblockhash() == goalbesthash # Verify we can now have the data for a block previously pruned assert self.nodes[2].getblock( self.forkhash)["height"] == self.forkheight def manual_test(self, node_number, use_timestamp): # at this point, node has 995 blocks and has not yet run in prune mode self.start_node(node_number) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500) # now re-start in manual pruning mode self.stop_node(node_number) self.start_node(node_number, extra_args=["-prune=1"]) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) def height(index): if use_timestamp: return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW else: return index def prune(index, expected_ret=None): ret = node.pruneblockchain(height(index)) # Check the return value. When use_timestamp is True, just check # that the return value is less than or equal to the expected # value, because when more than one block is generated per second, # a timestamp will not be granular enough to uniquely identify an # individual block. if expected_ret is None: expected_ret = index if use_timestamp: assert_greater_than(ret, 0) assert_greater_than(expected_ret + 1, ret) else: assert_equal(ret, expected_ret) def has_block(index): return os.path.isfile(os.path.join(self.nodes[node_number].datadir, "regtest", "blocks", "blk{:05}.dat".format(index))) # should not prune because chain tip of node 3 (995) < PruneAfterHeight # (1000) assert_raises_rpc_error( -1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) node.generate(6) assert_equal(node.getblockchaininfo()["blocks"], 1001) # negative heights should raise an exception assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10) # height=100 too low to prune first block file so this is a no-op prune(100) if not has_block(0): raise AssertionError( "blk00000.dat is missing when should still be there") # Does nothing node.pruneblockchain(height(0)) if not has_block(0): raise AssertionError( "blk00000.dat is missing when should still be there") # height=500 should prune first file prune(500) if has_block(0): raise AssertionError( "blk00000.dat is still there, should be pruned by now") if not has_block(1): raise AssertionError( "blk00001.dat is missing when should still be there") # height=650 should prune second file prune(650) if has_block(1): raise AssertionError( "blk00001.dat is still there, should be pruned by now") # height=1000 should not prune anything more, because tip-288 is in # blk00002.dat. prune(1000, 1001 - MIN_BLOCKS_TO_KEEP) if not has_block(2): raise AssertionError( "blk00002.dat is still there, should be pruned by now") # advance the tip so blk00002.dat and blk00003.dat can be pruned (the # last 288 blocks should now be in blk00004.dat) node.generate(288) prune(1000) if has_block(2): raise AssertionError( "blk00002.dat is still there, should be pruned by now") if has_block(3): raise AssertionError( "blk00003.dat is still there, should be pruned by now") # stop node, start back up with auto-prune at 550MB, make sure still # runs self.stop_node(node_number) self.start_node(node_number, extra_args=["-prune=550"]) self.log.info("Success") def wallet_test(self): # check that the pruning node's wallet is still in good shape self.log.info("Stop and start pruning node to trigger wallet rescan") self.stop_node(2) self.start_node( 2, extra_args=["-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Success") # check that wallet loads successfully when restarting a pruned node after IBD. # this was reported to fail in #7494. self.log.info("Syncing node 5 to test wallet") connect_nodes(self.nodes[0], self.nodes[5]) nds = [self.nodes[0], self.nodes[5]] sync_blocks(nds, wait=5, timeout=300) self.stop_node(5) # stop and start to trigger rescan self.start_node( 5, extra_args=["-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Success") def run_test(self): self.log.info( "Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") self.log.info("Mining a big blockchain of 995 blocks") # Determine default relay fee self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] # Cache for utxos, as the listunspent may take a long time later in the # test self.utxo_cache_0 = [] self.utxo_cache_1 = [] self.create_big_chain() # Chain diagram key: # * blocks on main chain # +,&,$,@ blocks on other forks # X invalidated block # N1 Node 1 # # Start by mining a simple chain that all nodes have # N0=N1=N2 **...*(995) # stop manual-pruning node with 995 blocks self.stop_node(3) self.stop_node(4) self.log.info( "Check that we haven't started pruning yet because we're below PruneAfterHeight") self.test_height_min() # Extend this chain past the PruneAfterHeight # N0=N1=N2 **...*(1020) self.log.info( "Check that we'll exceed disk space target if we have a very high stale block rate") self.create_chain_with_staleblocks() # Disconnect N0 # And mine a 24 block chain on N1 and a separate 25 block chain on N0 # N1=N2 **...*+...+(1044) # N0 **...**...**(1045) # # reconnect nodes causing reorg on N1 and N2 # N1=N2 **...*(1020) *...**(1045) # \ # +...+(1044) # # repeat this process until you have 12 stale forks hanging off the # main chain on N1 and N2 # N0 *************************...***************************(1320) # # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) # \ \ \ # +...+(1044) &.. $...$(1319) # Save some current chain state for later use self.mainchainheight = self.nodes[2].getblockcount() # 1320 self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) self.log.info("Check that we can survive a 288 block reorg still") (self.forkheight, self.forkhash) = self.reorg_test() # (1033, ) # Now create a 288 block reorg by mining a longer chain on N1 # First disconnect N1 # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain # N1 **...*(1020) **...**(1032)X.. # \ # ++...+(1031)X.. # # Now mine 300 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@(1332) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # Reconnect nodes and mine 220 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # N2 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ *...**(1320) # \ \ # ++...++(1044) .. # # N0 ********************(1032) @@...@@@(1552) # \ # *...**(1320) self.log.info( "Test that we can rerequest a block we previously pruned if needed for a reorg") self.reorg_back() # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to # original main chain (*), but will require redownload of some blocks # In order to have a peer we think we can download from, must also perform this invalidation # on N0 and mine a new longest chain to trigger. # Final result: # N0 ********************(1032) **...****(1553) # \ # X@...@@@(1552) # # N2 **...*(1020) **...**(1032) **...****(1553) # \ \ # \ X@...@@@(1552) # \ # +.. # # N1 doesn't change because 1033 on main chain (*) is invalid self.log.info("Test manual pruning with block indices") self.manual_test(3, use_timestamp=False) self.log.info("Test manual pruning with timestamps") self.manual_test(4, use_timestamp=True) self.log.info("Test wallet re-scan") self.wallet_test() self.log.info("Done") if __name__ == '__main__': PruneTest().main() diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py index 720a08560..62e9cd74a 100755 --- a/test/functional/interface_bitcoin_cli.py +++ b/test/functional/interface_bitcoin_cli.py @@ -1,95 +1,95 @@ #!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test bitcoin-cli""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_process_error, get_auth_cookie, ) class TestBitcoinCli(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def run_test(self): """Main test logic""" cli_response = self.nodes[0].cli("-version").send_cli() assert "Bitcoin ABC RPC client version" in cli_response self.log.info( "Compare responses from gewalletinfo RPC and `bitcoin-cli getwalletinfo`") cli_response = self.nodes[0].cli.getwalletinfo() rpc_response = self.nodes[0].getwalletinfo() assert_equal(cli_response, rpc_response) self.log.info( "Compare responses from getblockchaininfo RPC and `bitcoin-cli getblockchaininfo`") cli_response = self.nodes[0].cli.getblockchaininfo() rpc_response = self.nodes[0].getblockchaininfo() assert_equal(cli_response, rpc_response) user, password = get_auth_cookie(self.nodes[0].datadir) self.log.info("Test -stdinrpcpass option") assert_equal(0, self.nodes[0].cli( '-rpcuser={}'.format(user), '-stdinrpcpass', input=password).getblockcount()) assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli( '-rpcuser={}'.format(user), '-stdinrpcpass', input="foo").echo) self.log.info("Test -stdin and -stdinrpcpass") assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser={}'.format(user), '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo()) assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli( '-rpcuser={}'.format(user), '-stdin', '-stdinrpcpass', input="foo").echo) self.log.info("Test connecting to a non-existing server") assert_raises_process_error( 1, "Could not connect to the server", self.nodes[0].cli('-rpcport=1').echo) self.log.info("Test connecting with non-existing RPC cookie file") assert_raises_process_error(1, "Could not locate RPC credentials", self.nodes[0].cli( '-rpccookiefile=does-not-exist', '-rpcpassword=').echo) self.log.info("Make sure that -getinfo with arguments fails") assert_raises_process_error( 1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help) self.log.info( "Compare responses from `bitcoin-cli -getinfo` and the RPCs data is retrieved from.") cli_get_info = self.nodes[0].cli('-getinfo').send_cli() wallet_info = self.nodes[0].getwalletinfo() network_info = self.nodes[0].getnetworkinfo() blockchain_info = self.nodes[0].getblockchaininfo() assert_equal(cli_get_info['version'], network_info['version']) assert_equal(cli_get_info['protocolversion'], network_info['protocolversion']) assert_equal(cli_get_info['walletversion'], wallet_info['walletversion']) assert_equal(cli_get_info['balance'], wallet_info['balance']) assert_equal(cli_get_info['blocks'], blockchain_info['blocks']) assert_equal(cli_get_info['timeoffset'], network_info['timeoffset']) assert_equal(cli_get_info['connections'], network_info['connections']) assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy']) assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty']) assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test") assert_equal(cli_get_info['balance'], wallet_info['balance']) assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest']) assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize']) assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee']) assert_equal(cli_get_info['relayfee'], network_info['relayfee']) # unlocked_until is not tested because the wallet is not encrypted if __name__ == '__main__': TestBitcoinCli().main() diff --git a/test/functional/interface_http.py b/test/functional/interface_http.py index 0c1817e90..4d1261a50 100755 --- a/test/functional/interface_http.py +++ b/test/functional/interface_http.py @@ -1,172 +1,172 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the RPC HTTP basics.""" import http.client import urllib.parse from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, str_to_b64str class HTTPBasicsTest (BitcoinTestFramework): def set_test_params(self): self.num_nodes = 3 def setup_network(self): self.extra_args = [["-rpccorsdomain=null"], [], []] self.setup_nodes() def run_test(self): # # lowlevel check for http persistent connection # # url = urllib.parse.urlparse(self.nodes[0].url) authpair = url.username + ':' + url.password headers = {"Authorization": "Basic " + str_to_b64str(authpair)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert b'"error":null' in out1 assert conn.sock != None # according to http/1.1 connection must still be open! # send 2nd request without closing connection conn.request('POST', '/', '{"method": "getchaintips"}', headers) out1 = conn.getresponse().read() assert b'"error":null' in out1 # must also response with a correct json-rpc message assert conn.sock != None # according to http/1.1 connection must still be open! conn.close() # same should be if we add keep-alive because this should be the std. # behaviour headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert b'"error":null' in out1 assert conn.sock != None # according to http/1.1 connection must still be open! # send 2nd request without closing connection conn.request('POST', '/', '{"method": "getchaintips"}', headers) out1 = conn.getresponse().read() assert b'"error":null' in out1 # must also response with a correct json-rpc message assert conn.sock != None # according to http/1.1 connection must still be open! conn.close() # now do the same with "Connection: close" headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "close"} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert b'"error":null' in out1 assert conn.sock == None # now the connection must be closed after the response # node1 (2nd node) is running with disabled keep-alive option urlNode1 = urllib.parse.urlparse(self.nodes[1].url) authpair = urlNode1.username + ':' + urlNode1.password headers = {"Authorization": "Basic " + str_to_b64str(authpair)} conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert b'"error":null' in out1 # node2 (third node) is running with standard keep-alive parameters # which means keep-alive is on urlNode2 = urllib.parse.urlparse(self.nodes[2].url) authpair = urlNode2.username + ':' + urlNode2.password headers = {"Authorization": "Basic " + str_to_b64str(authpair)} conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert b'"error":null' in out1 assert conn.sock != None # connection must be closed because bitcoind should use # keep-alive by default # Check excessive request size conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) conn.connect() conn.request('GET', '/' + ('x' * 1000), '', headers) out1 = conn.getresponse() assert_equal(out1.status, http.client.NOT_FOUND) conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) conn.connect() conn.request('GET', '/' + ('x' * 10000), '', headers) out1 = conn.getresponse() assert_equal(out1.status, http.client.BAD_REQUEST) # Check Standard CORS request origin = "null" conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() authpair = url.username + ':' + url.password headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Origin": origin} conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse() assert_equal(out1.status, http.client.OK) assert_equal(out1.headers["Access-Control-Allow-Origin"], origin) assert_equal(out1.headers["Access-Control-Allow-Credentials"], "true") assert_equal(out1.headers["Access-Control-Expose-Headers"], "WWW-Authenticate") assert b'"error":null' in out1.read() # Check Pre-flight CORS request corsheaders = {"Origin": origin, "Access-Control-Request-Method": "POST"} conn.request('OPTIONS', '/', None, corsheaders) out1 = conn.getresponse() assert_equal(out1.status, http.client.OK) assert_equal(out1.headers["Access-Control-Allow-Origin"], origin) assert_equal(out1.headers["Access-Control-Allow-Credentials"], "true") assert_equal(out1.headers["Access-Control-Allow-Methods"], "POST") assert_equal(out1.headers["Access-Control-Allow-Headers"], "authorization,content-type") assert_equal(b'', out1.read()) # Check Standard CORS request to node without CORS, expected failure conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) conn.connect() authpair = url.username + ':' + url.password headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Origin": origin} conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse() assert_equal(out1.status, http.client.UNAUTHORIZED) assert_equal(b'', out1.read()) # Check Pre-flight CORS request to node without CORS, expected failure corsheaders = {"Origin": origin, "Access-Control-Request-Method": "POST"} conn.request('OPTIONS', '/', None, corsheaders) out1 = conn.getresponse() assert_equal(out1.status, http.client.METHOD_NOT_ALLOWED) assert_equal(b'JSONRPC server handles only POST requests', out1.read()) if __name__ == '__main__': HTTPBasicsTest().main() diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py index b6935dcd0..0d47e5892 100755 --- a/test/functional/interface_rest.py +++ b/test/functional/interface_rest.py @@ -1,333 +1,333 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the REST API.""" import binascii from decimal import Decimal from enum import Enum import http.client from io import BytesIO import json from struct import pack, unpack import urllib.parse from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_greater_than_or_equal, hex_str_to_bytes, ) class ReqType(Enum): JSON = 1 BIN = 2 HEX = 3 class RetType(Enum): OBJ = 1 BYTES = 2 JSON = 3 def filter_output_indices_by_value(vouts, value): for vout in vouts: if vout['value'] == value: yield vout['n'] class RESTTest (BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [["-rest"], []] def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON, body='', status=200, ret_type=RetType.JSON): rest_uri = '/rest' + uri if req_type == ReqType.JSON: rest_uri += '.json' elif req_type == ReqType.BIN: rest_uri += '.bin' elif req_type == ReqType.HEX: rest_uri += '.hex' conn = http.client.HTTPConnection(self.url.hostname, self.url.port) self.log.debug('{} {} {}'.format(http_method, rest_uri, body)) if http_method == 'GET': conn.request('GET', rest_uri) elif http_method == 'POST': conn.request('POST', rest_uri, body) resp = conn.getresponse() assert_equal(resp.status, status) if ret_type == RetType.OBJ: return resp elif ret_type == RetType.BYTES: return resp.read() elif ret_type == RetType.JSON: return json.loads(resp.read().decode('utf-8'), parse_float=Decimal) def run_test(self): self.url = urllib.parse.urlparse(self.nodes[0].url) self.log.info("Mine blocks and send Bitcoin Cash to node 1") # Random address so node1's balance doesn't increase not_related_address = "2MxqoHEdNQTyYeX1mHcbrrpzgojbosTpCvJ" self.nodes[0].generate(1) self.sync_all() self.nodes[1].generatetoaddress(100, not_related_address) self.sync_all() assert_equal(self.nodes[0].getbalance(), 50) txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) self.sync_all() self.nodes[1].generatetoaddress(1, not_related_address) self.sync_all() bb_hash = self.nodes[0].getbestblockhash() assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) self.log.info("Load the transaction using the /tx URI") json_obj = self.test_rest_request("/tx/{}".format(txid)) # Get the vin to later check for utxo (should be spent by then) spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']) # Get n of 0.1 outpoint n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1')) spending = (txid, n) self.log.info("Query an unspent TXO using the /getutxos URI") json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending)) # Check chainTip response assert_equal(json_obj['chaintipHash'], bb_hash) # Make sure there is one utxo assert_equal(len(json_obj['utxos']), 1) assert_equal(json_obj['utxos'][0]['value'], Decimal('0.1')) self.log.info("Query a spent TXO using the /getutxos URI") json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent)) # Check chainTip response assert_equal(json_obj['chaintipHash'], bb_hash) # Make sure there is no utxo in the response because this outpoint has # been spent assert_equal(len(json_obj['utxos']), 0) # Check bitmap assert_equal(json_obj['bitmap'], "0") self.log.info("Query two TXOs using the /getutxos URI") json_obj = self.test_rest_request( "/getutxos/{}-{}/{}-{}".format(*(spending + spent))) assert_equal(len(json_obj['utxos']), 1) assert_equal(json_obj['bitmap'], "10") self.log.info( "Query the TXOs using the /getutxos URI with a binary response") bin_request = b'\x01\x02' for txid, n in [spending, spent]: bin_request += hex_str_to_bytes(txid) bin_request += pack("i", n) bin_response = self.test_rest_request( "/getutxos", http_method='POST', req_type=ReqType.BIN, body=bin_request, ret_type=RetType.BYTES) output = BytesIO(bin_response) chain_height, = unpack("i", output.read(4)) response_hash = output.read(32)[::-1].hex() # Check if getutxo's chaintip during calculation was fine assert_equal(bb_hash, response_hash) # Chain height must be 102 assert_equal(chain_height, 102) self.log.info("Test the /getutxos URI with and without /checkmempool") # Create a transaction, check that it's found with /checkmempool, but # not found without. Then confirm the transaction and check that it's # found with or without /checkmempool. # Do a tx and don't sync txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) json_obj = self.test_rest_request("/tx/{}".format(txid)) # Get the spent output to later check for utxo (should be spent by then) spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']) # Get n of 0.1 outpoint n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1')) spending = (txid, n) json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending)) assert_equal(len(json_obj['utxos']), 0) json_obj = self.test_rest_request( "/getutxos/checkmempool/{}-{}".format(*spending)) assert_equal(len(json_obj['utxos']), 1) json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent)) assert_equal(len(json_obj['utxos']), 1) json_obj = self.test_rest_request( "/getutxos/checkmempool/{}-{}".format(*spent)) assert_equal(len(json_obj['utxos']), 0) self.nodes[0].generate(1) self.sync_all() json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending)) assert_equal(len(json_obj['utxos']), 1) json_obj = self.test_rest_request( "/getutxos/checkmempool/{}-{}".format(*spending)) assert_equal(len(json_obj['utxos']), 1) # Do some invalid requests self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.JSON, body='{"checkmempool', status=400, ret_type=RetType.OBJ) self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body='{"checkmempool', status=400, ret_type=RetType.OBJ) self.test_rest_request("/getutxos/checkmempool", http_method='POST', req_type=ReqType.JSON, status=400, ret_type=RetType.OBJ) # Test limits long_uri = '/'.join(["{}-{}".format(txid, n_) for n_ in range(20)]) self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=400, ret_type=RetType.OBJ) long_uri = '/'.join(['{}-{}'.format(txid, n_) for n_ in range(15)]) self.test_rest_request( "/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=200) # Generate block to not affect upcoming tests self.nodes[0].generate( 1) self.sync_all() self.log.info("Test the /block and /headers URIs") bb_hash = self.nodes[0].getbestblockhash() # Check binary format response = self.test_rest_request( "/block/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ) assert_greater_than(int(response.getheader('content-length')), 80) response_bytes = response.read() # Compare with block header response_header = self.test_rest_request( "/headers/1/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ) assert_equal(int(response_header.getheader('content-length')), 80) response_header_bytes = response_header.read() assert_equal(response_bytes[0:80], response_header_bytes) # Check block hex format response_hex = self.test_rest_request( "/block/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ) assert_greater_than(int(response_hex.getheader('content-length')), 160) response_hex_bytes = response_hex.read().strip(b'\n') assert_equal(binascii.hexlify(response_bytes), response_hex_bytes) # Compare with hex block header response_header_hex = self.test_rest_request( "/headers/1/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ) assert_greater_than( int(response_header_hex.getheader('content-length')), 160) response_header_hex_bytes = response_header_hex.read(160) assert_equal(binascii.hexlify( response_bytes[:80]), response_header_hex_bytes) # Check json format block_json_obj = self.test_rest_request("/block/{}".format(bb_hash)) assert_equal(block_json_obj['hash'], bb_hash) # Compare with json block header json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash)) # Ensure that there is one header in the json response assert_equal(len(json_obj), 1) # Request/response hash should be the same assert_equal(json_obj[0]['hash'], bb_hash) # Compare with normal RPC block response rpc_block_json = self.nodes[0].getblock(bb_hash) for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot', 'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']: assert_equal(json_obj[0][key], rpc_block_json[key]) # See if we can get 5 headers in one response self.nodes[1].generate(5) self.sync_all() json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash)) # Now we should have 5 header objects assert_equal(len(json_obj), 5) self.log.info("Test the /tx URI") tx_hash = block_json_obj['tx'][0]['txid'] json_obj = self.test_rest_request("/tx/{}".format(tx_hash)) assert_equal(json_obj['txid'], tx_hash) # Check hex format response hex_response = self.test_rest_request( "/tx/{}".format(tx_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ) assert_greater_than_or_equal( int(hex_response.getheader('content-length')), json_obj['size']*2) self.log.info("Test tx inclusion in the /mempool and /block URIs") # Make 3 tx and mine them on node 1 txs = [] txs.append(self.nodes[0].sendtoaddress(not_related_address, 11)) txs.append(self.nodes[0].sendtoaddress(not_related_address, 11)) txs.append(self.nodes[0].sendtoaddress(not_related_address, 11)) self.sync_all() # Check that there are exactly 3 transactions in the TX memory pool # before generating the block json_obj = self.test_rest_request("/mempool/info") assert_equal(json_obj['size'], 3) # The size of the memory pool should be greater than 3x ~100 bytes assert_greater_than(json_obj['bytes'], 300) # Check that there are our submitted transactions in the TX memory pool json_obj = self.test_rest_request("/mempool/contents") for i, tx in enumerate(txs): assert tx in json_obj assert_equal(json_obj[tx]['spentby'], txs[i + 1:i + 2]) assert_equal(json_obj[tx]['depends'], txs[i - 1:i]) # Now mine the transactions newblockhash = self.nodes[1].generate(1) self.sync_all() # Check if the 3 tx show up in the new block json_obj = self.test_rest_request("/block/{}".format(newblockhash[0])) non_coinbase_txs = {tx['txid'] for tx in json_obj['tx'] if 'coinbase' not in tx['vin'][0]} assert_equal(non_coinbase_txs, set(txs)) # Check the same but without tx details json_obj = self.test_rest_request( "/block/notxdetails/{}".format(newblockhash[0])) for tx in txs: assert tx in json_obj['tx'] self.log.info("Test the /chaininfo URI") bb_hash = self.nodes[0].getbestblockhash() json_obj = self.test_rest_request("/chaininfo") assert_equal(json_obj['bestblockhash'], bb_hash) if __name__ == '__main__': RESTTest().main() diff --git a/test/functional/interface_zmq.py b/test/functional/interface_zmq.py index b9ade789d..7a04dbf1a 100755 --- a/test/functional/interface_zmq.py +++ b/test/functional/interface_zmq.py @@ -1,128 +1,128 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the ZMQ notification interface.""" import configparser import struct from io import BytesIO from test_framework.test_framework import BitcoinTestFramework, SkipTest from test_framework.messages import CTransaction from test_framework.util import ( assert_equal, hash256, ) class ZMQSubscriber: def __init__(self, socket, topic): self.sequence = 0 self.socket = socket self.topic = topic import zmq self.socket.setsockopt(zmq.SUBSCRIBE, self.topic) def receive(self): topic, body, seq = self.socket.recv_multipart() # Topic should match the subscriber topic. assert_equal(topic, self.topic) # Sequence should be incremental. assert_equal(struct.unpack('= than minrelaytxfee txF = self.nodes[0].fundrawtransaction(tx, {'feeRate': relayfee}) txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex']) assert_raises_rpc_error(-26, "mempool min fee not met", self.nodes[0].sendrawtransaction, txFS['hex']) if __name__ == '__main__': MempoolLimitTest().main() diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py index d617aad4e..01e168a4d 100755 --- a/test/functional/mempool_packages.py +++ b/test/functional/mempool_packages.py @@ -1,326 +1,326 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test descendant package tracking code.""" from decimal import Decimal from test_framework.messages import COIN from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, satoshi_round, sync_blocks, sync_mempools, ) MAX_ANCESTORS = 25 MAX_DESCENDANTS = 25 class MempoolPackagesTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [["-maxorphantx=1000"], ["-maxorphantx=1000", "-limitancestorcount=5"]] # Build a transaction that spends parent_txid:vout # Return amount sent def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs): send_value = satoshi_round((value - fee) / num_outputs) inputs = [{'txid': parent_txid, 'vout': vout}] outputs = {} for i in range(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs) signedtx = node.signrawtransactionwithwallet(rawtx) txid = node.sendrawtransaction(signedtx['hex']) fulltx = node.getrawtransaction(txid, 1) # make sure we didn't generate a change output assert len(fulltx['vout']) == num_outputs return (txid, send_value) def run_test(self): ''' Mine some blocks and have them mature. ''' self.nodes[0].generate(101) utxo = self.nodes[0].listunspent(10) txid = utxo[0]['txid'] vout = utxo[0]['vout'] value = utxo[0]['amount'] fee = Decimal("0.0001") # MAX_ANCESTORS transactions off a confirmed tx should be fine chain = [] for i in range(MAX_ANCESTORS): (txid, sent_value) = self.chain_transaction( self.nodes[0], txid, 0, value, fee, 1) value = sent_value chain.append(txid) # Check mempool has MAX_ANCESTORS transactions in it, and descendant and ancestor # count and fees should look correct mempool = self.nodes[0].getrawmempool(True) assert_equal(len(mempool), MAX_ANCESTORS) descendant_count = 1 descendant_fees = 0 descendant_size = 0 ancestor_size = sum([mempool[tx]['size'] for tx in mempool]) ancestor_count = MAX_ANCESTORS ancestor_fees = sum([mempool[tx]['fee'] for tx in mempool]) descendants = [] ancestors = list(chain) for x in reversed(chain): # Check that getmempoolentry is consistent with getrawmempool entry = self.nodes[0].getmempoolentry(x) assert_equal(entry, mempool[x]) # Check that the descendant calculations are correct assert_equal(mempool[x]['descendantcount'], descendant_count) descendant_fees += mempool[x]['fee'] assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']) assert_equal(mempool[x]['fees']['base'], mempool[x]['fee']) assert_equal(mempool[x]['fees']['modified'], mempool[x]['modifiedfee']) assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN) assert_equal(mempool[x]['fees']['descendant'], descendant_fees) descendant_size += mempool[x]['size'] assert_equal(mempool[x]['descendantsize'], descendant_size) descendant_count += 1 # Check that ancestor calculations are correct assert_equal(mempool[x]['ancestorcount'], ancestor_count) assert_equal(mempool[x]['ancestorfees'], ancestor_fees * COIN) assert_equal(mempool[x]['ancestorsize'], ancestor_size) ancestor_size -= mempool[x]['size'] ancestor_fees -= mempool[x]['fee'] ancestor_count -= 1 # Check that parent/child list is correct assert_equal(mempool[x]['spentby'], descendants[-1:]) assert_equal(mempool[x]['depends'], ancestors[-2:-1]) # Check that getmempooldescendants is correct assert_equal(sorted(descendants), sorted( self.nodes[0].getmempooldescendants(x))) # Check getmempooldescendants verbose output is correct for descendant, dinfo in self.nodes[0].getmempooldescendants(x, True).items(): assert_equal(dinfo['depends'], [ chain[chain.index(descendant) - 1]]) if dinfo['descendantcount'] > 1: assert_equal(dinfo['spentby'], [ chain[chain.index(descendant) + 1]]) else: assert_equal(dinfo['spentby'], []) descendants.append(x) # Check that getmempoolancestors is correct ancestors.remove(x) assert_equal(sorted(ancestors), sorted( self.nodes[0].getmempoolancestors(x))) # Check that getmempoolancestors verbose output is correct for ancestor, ainfo in self.nodes[0].getmempoolancestors(x, True).items(): assert_equal(ainfo['spentby'], [ chain[chain.index(ancestor) + 1]]) if ainfo['ancestorcount'] > 1: assert_equal(ainfo['depends'], [ chain[chain.index(ancestor) - 1]]) else: assert_equal(ainfo['depends'], []) # Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True) assert_equal(len(v_ancestors), len(chain) - 1) for x in v_ancestors.keys(): assert_equal(mempool[x], v_ancestors[x]) assert chain[-1] not in v_ancestors.keys() v_descendants = self.nodes[0].getmempooldescendants(chain[0], True) assert_equal(len(v_descendants), len(chain) - 1) for x in v_descendants.keys(): assert_equal(mempool[x], v_descendants[x]) assert chain[0] not in v_descendants.keys() # Check that ancestor modified fees includes fee deltas from # prioritisetransaction self.nodes[0].prioritisetransaction(chain[0], 0, 1000) mempool = self.nodes[0].getrawmempool(True) ancestor_fees = 0 for x in chain: ancestor_fees += mempool[x]['fee'] assert_equal(mempool[x]['fees']['ancestor'], ancestor_fees + Decimal('0.00001')) assert_equal(mempool[x]['ancestorfees'], ancestor_fees * COIN + 1000) # Undo the prioritisetransaction for later tests self.nodes[0].prioritisetransaction(chain[0], 0, -1000) # Check that descendant modified fees includes fee deltas from # prioritisetransaction self.nodes[0].prioritisetransaction(chain[-1], 0, 1000) mempool = self.nodes[0].getrawmempool(True) descendant_fees = 0 for x in reversed(chain): descendant_fees += mempool[x]['fee'] assert_equal(mempool[x]['fees']['descendant'], descendant_fees + Decimal('0.00001')) assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000) # Adding one more transaction on to the chain should fail. assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], txid, vout, value, fee, 1) # Check that prioritising a tx before it's added to the mempool works # First clear the mempool by mining a block. self.nodes[0].generate(1) sync_blocks(self.nodes) assert_equal(len(self.nodes[0].getrawmempool()), 0) # Prioritise a transaction that has been mined, then add it back to the # mempool by using invalidateblock. self.nodes[0].prioritisetransaction(chain[-1], 0, 2000) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Keep node1's tip synced with node0 self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash()) # Now check that the transaction is in the mempool, with the right modified fee mempool = self.nodes[0].getrawmempool(True) descendant_fees = 0 for x in reversed(chain): descendant_fees += mempool[x]['fee'] if (x == chain[-1]): assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'] + satoshi_round(0.00002)) assert_equal(mempool[x]['fees']['modified'], mempool[x]['fee'] + satoshi_round(0.00002)) assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000) assert_equal(mempool[x]['fees']['descendant'], descendant_fees + satoshi_round(0.00002)) # TODO: check that node1's mempool is as expected # TODO: test ancestor size limits # Now test descendant chain limits txid = utxo[1]['txid'] value = utxo[1]['amount'] vout = utxo[1]['vout'] transaction_package = [] tx_children = [] # First create one parent tx with 10 children (txid, sent_value) = self.chain_transaction( self.nodes[0], txid, vout, value, fee, 10) parent_transaction = txid for i in range(10): transaction_package.append( {'txid': txid, 'vout': i, 'amount': sent_value}) # Sign and send up to MAX_DESCENDANT transactions chained off the parent tx for i in range(MAX_DESCENDANTS - 1): utxo = transaction_package.pop(0) (txid, sent_value) = self.chain_transaction( self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10) if utxo['txid'] is parent_transaction: tx_children.append(txid) for j in range(10): transaction_package.append( {'txid': txid, 'vout': j, 'amount': sent_value}) mempool = self.nodes[0].getrawmempool(True) assert_equal(mempool[parent_transaction] ['descendantcount'], MAX_DESCENDANTS) assert_equal(sorted(mempool[parent_transaction] ['spentby']), sorted(tx_children)) for child in tx_children: assert_equal(mempool[child]['depends'], [parent_transaction]) # Sending one more chained transaction will fail utxo = transaction_package.pop(0) assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10) # TODO: check that node1's mempool is as expected # TODO: test descendant size limits # Test reorg handling # First, the basics: self.nodes[0].generate(1) sync_blocks(self.nodes) self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash()) self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash()) # Now test the case where node1 has a transaction T in its mempool that # depends on transactions A and B which are in a mined block, and the # block containing A and B is disconnected, AND B is not accepted back # into node1's mempool because its ancestor count is too high. # Create 8 transactions, like so: # Tx0 -> Tx1 (vout0) # \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7 # # Mine them in the next block, then generate a new tx8 that spends # Tx1 and Tx7, and add to node1's mempool, then disconnect the # last block. # Create tx0 with 2 outputs utxo = self.nodes[0].listunspent() txid = utxo[0]['txid'] value = utxo[0]['amount'] vout = utxo[0]['vout'] send_value = satoshi_round((value - fee) / 2) inputs = [{'txid': txid, 'vout': vout}] outputs = {} for i in range(2): outputs[self.nodes[0].getnewaddress()] = send_value rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx) txid = self.nodes[0].sendrawtransaction(signedtx['hex']) tx0_id = txid value = send_value # Create tx1 tx1_id, _ = self.chain_transaction( self.nodes[0], tx0_id, 0, value, fee, 1) # Create tx2-7 vout = 1 txid = tx0_id for i in range(6): (txid, sent_value) = self.chain_transaction( self.nodes[0], txid, vout, value, fee, 1) vout = 0 value = sent_value # Mine these in a block self.nodes[0].generate(1) self.sync_all() # Now generate tx8, with a big fee inputs = [{'txid': tx1_id, 'vout': 0}, {'txid': txid, 'vout': 0}] outputs = {self.nodes[0].getnewaddress(): send_value + value - 4 * fee} rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx) txid = self.nodes[0].sendrawtransaction(signedtx['hex']) sync_mempools(self.nodes) # Now try to disconnect the tip on each node... self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash()) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) sync_blocks(self.nodes) if __name__ == '__main__': MempoolPackagesTest().main() diff --git a/test/functional/mempool_resurrect.py b/test/functional/mempool_resurrect.py index c4ea82c6d..2368de639 100755 --- a/test/functional/mempool_resurrect.py +++ b/test/functional/mempool_resurrect.py @@ -1,75 +1,75 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test resurrection of mined transactions when the blockchain is re-organized.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, create_tx # Create one-input, one-output, no-fee transaction: class MempoolCoinbaseTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [["-checkmempool"]] def run_test(self): node0_address = self.nodes[0].getnewaddress() # Spend block 1/2/3's coinbase transactions # Mine a block. # Create three more transactions, spending the spends # Mine another block. # ... make sure all the transactions are confirmed # Invalidate both blocks # ... make sure all the transactions are put back in the mempool # Mine a new block # ... make sure all the transactions are confirmed again. b = [self.nodes[0].getblockhash(n) for n in range(1, 4)] coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b] spends1_raw = [create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids] spends1_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw] blocks = [] blocks.extend(self.nodes[0].generate(1)) spends2_raw = [create_tx(self.nodes[0], txid, node0_address, 49.98) for txid in spends1_id] spends2_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw] blocks.extend(self.nodes[0].generate(1)) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id + spends2_id: tx = self.nodes[0].gettransaction(txid) assert tx["confirmations"] > 0 # Use invalidateblock to re-org back; all transactions should # end up unconfirmed and back in the mempool for node in self.nodes: node.invalidateblock(blocks[0]) # mempool should be empty, all txns confirmed assert_equal( set(self.nodes[0].getrawmempool()), set(spends1_id + spends2_id)) for txid in spends1_id + spends2_id: tx = self.nodes[0].gettransaction(txid) assert tx["confirmations"] == 0 # Generate another block, they should all get mined self.nodes[0].generate(1) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id + spends2_id: tx = self.nodes[0].gettransaction(txid) assert tx["confirmations"] > 0 if __name__ == '__main__': MempoolCoinbaseTest().main() diff --git a/test/functional/mining_getblocktemplate_longpoll.py b/test/functional/mining_getblocktemplate_longpoll.py index 444b187ce..fcac12e64 100755 --- a/test/functional/mining_getblocktemplate_longpoll.py +++ b/test/functional/mining_getblocktemplate_longpoll.py @@ -1,87 +1,87 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test longpolling with getblocktemplate.""" from decimal import Decimal import threading from test_framework.test_framework import BitcoinTestFramework from test_framework.util import get_rpc_proxy, random_transaction class LongpollThread(threading.Thread): def __init__(self, node): threading.Thread.__init__(self) # query current longpollid templat = node.getblocktemplate() self.longpollid = templat['longpollid'] # create a new connection to the node, we can't use the same # connection from two threads self.node = get_rpc_proxy( node.url, 1, timeout=600, coveragedir=node.coverage_dir) def run(self): self.node.getblocktemplate({'longpollid': self.longpollid}) class GetBlockTemplateLPTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 def run_test(self): self.log.info( "Warning: this test will take about 70 seconds in the best case. Be patient.") self.nodes[0].generate(10) templat = self.nodes[0].getblocktemplate() longpollid = templat['longpollid'] # longpollid should not change between successive invocations if # nothing else happens templat2 = self.nodes[0].getblocktemplate() assert templat2['longpollid'] == longpollid # Test 1: test that the longpolling wait if we do nothing thr = LongpollThread(self.nodes[0]) thr.start() # check that thread still lives # wait 5 seconds or until thread exits thr.join(5) assert thr.is_alive() # Test 2: test that longpoll will terminate if another node generates a block # generate a block on another node self.nodes[1].generate(1) # check that thread will exit now that new transaction entered mempool # wait 5 seconds or until thread exits thr.join(5) assert not thr.is_alive() # Test 3: test that longpoll will terminate if we generate a block # ourselves thr = LongpollThread(self.nodes[0]) thr.start() # generate a block on another node self.nodes[0].generate(1) # wait 5 seconds or until thread exits thr.join(5) assert not thr.is_alive() # Test 4: test that introducing a new transaction into the mempool will # terminate the longpoll thr = LongpollThread(self.nodes[0]) thr.start() # generate a random transaction and submit it min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"] # min_relay_fee is fee per 1000 bytes, which should be more than enough. (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20) # after one minute, every 10 seconds the mempool is probed, so in 80 # seconds it should have returned thr.join(60 + 20) assert not thr.is_alive() if __name__ == '__main__': GetBlockTemplateLPTest().main() diff --git a/test/functional/mining_prioritisetransaction.py b/test/functional/mining_prioritisetransaction.py index 2bd9ea8bd..f499884e8 100755 --- a/test/functional/mining_prioritisetransaction.py +++ b/test/functional/mining_prioritisetransaction.py @@ -1,166 +1,166 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the prioritisetransaction mining RPC.""" import time from test_framework.blocktools import ( create_confirmed_utxos, send_big_transactions, ) # FIXME: review how this test needs to be adapted w.r.t _LEGACY_MAX_BLOCK_SIZE from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE from test_framework.messages import COIN from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error class PrioritiseTransactionTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [["-printpriority=1"], ["-printpriority=1"]] def run_test(self): self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] utxo_count = 90 utxos = create_confirmed_utxos(self.nodes[0], utxo_count) txids = [] # Create 3 batches of transactions at 3 different fee rate levels range_size = utxo_count // 3 for i in range(3): txids.append([]) start_range = i * range_size end_range = start_range + range_size txids[i] = send_big_transactions(self.nodes[0], utxos[start_range:end_range], end_range - start_range, 10 * (i + 1)) # Make sure that the size of each group of transactions exceeds # LEGACY_MAX_BLOCK_SIZE -- otherwise the test needs to be revised to create # more transactions. mempool = self.nodes[0].getrawmempool(True) sizes = [0, 0, 0] for i in range(3): for j in txids[i]: assert j in mempool sizes[i] += mempool[j]['size'] # Fail => raise utxo_count assert sizes[i] > LEGACY_MAX_BLOCK_SIZE # add a fee delta to something in the cheapest bucket and make sure it gets mined # also check that a different entry in the cheapest bucket is NOT mined (lower # the priority to ensure its not mined due to priority) self.nodes[0].prioritisetransaction( txids[0][0], 0, 100 * self.nodes[0].calculate_fee_from_txid(txids[0][0])) self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0) self.nodes[0].generate(1) mempool = self.nodes[0].getrawmempool() self.log.info("Assert that prioritised transaction was mined") assert txids[0][0] not in mempool assert txids[0][1] in mempool confirmed_transactions = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['tx'] # Pull the highest fee-rate transaction from a block high_fee_tx = confirmed_transactions[1] # Something high-fee should have been mined! assert high_fee_tx != None # Add a prioritisation before a tx is in the mempool (de-prioritising a # high-fee transaction so that it's now low fee). # # NOTE WELL: gettransaction returns the fee as a negative number and # as fractional coins. However, the prioritisetransaction expects a # number of satoshi to add or subtract from the actual fee. # Thus the conversation here is simply int(tx_fee*COIN) to remove all fees, and then # we add the minimum fee back. tx_fee = self.nodes[0].gettransaction(high_fee_tx)['fee'] self.nodes[0].prioritisetransaction( high_fee_tx, -1e15, int(tx_fee*COIN) + self.nodes[0].calculate_fee_from_txid(high_fee_tx)) # Add everything back to mempool self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Check to make sure our high fee rate tx is back in the mempool mempool = self.nodes[0].getrawmempool() assert high_fee_tx in mempool # Now verify the modified-high feerate transaction isn't mined before # the other high fee transactions. Keep mining until our mempool has # decreased by all the high fee size that we calculated above. while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]): self.nodes[0].generate(1) # High fee transaction should not have been mined, but other high fee rate # transactions should have been. mempool = self.nodes[0].getrawmempool() self.log.info( "Assert that de-prioritised transaction is still in mempool") assert high_fee_tx in mempool for x in txids[2]: if (x != high_fee_tx): assert x not in mempool # Create a free, low priority transaction. Should be rejected. utxo_list = self.nodes[0].listunspent() assert len(utxo_list) > 0 utxo = utxo_list[0] inputs = [] outputs = {} inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]}) outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) tx_hex = self.nodes[0].signrawtransactionwithwallet(raw_tx)["hex"] txid = self.nodes[0].sendrawtransaction(tx_hex) # A tx that spends an in-mempool tx has 0 priority, so we can use it to # test the effect of using prioritise transaction for mempool # acceptance inputs = [] inputs.append({"txid": txid, "vout": 0}) outputs = {} outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs) tx2_hex = self.nodes[0].signrawtransactionwithwallet(raw_tx2)["hex"] tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"] # This will raise an exception due to min relay fee not being met assert_raises_rpc_error(-26, "insufficient priority (code 66)", self.nodes[0].sendrawtransaction, tx2_hex) assert tx2_id not in self.nodes[0].getrawmempool() # This is a less than 1000-byte transaction, so just set the fee # to be the minimum for a 1000-byte transaction and check that it is # accepted. self.nodes[0].prioritisetransaction( tx2_id, 0, int(self.relayfee * COIN)) self.log.info( "Assert that prioritised free transaction is accepted to mempool") assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id) assert tx2_id in self.nodes[0].getrawmempool() # Test that calling prioritisetransaction is sufficient to trigger # getblocktemplate to (eventually) return a new block. mock_time = int(time.time()) self.nodes[0].setmocktime(mock_time) template = self.nodes[0].getblocktemplate() self.nodes[0].prioritisetransaction( tx2_id, 0, -int(self.relayfee * COIN)) self.nodes[0].setmocktime(mock_time + 10) new_template = self.nodes[0].getblocktemplate() assert template != new_template if __name__ == '__main__': PrioritiseTransactionTest().main() diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index 3c2d74f48..56041576f 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -1,927 +1,927 @@ #!/usr/bin/env python3 -# Copyright (c) 2016 The Bitcoin Core developers +# Copyright (c) 2016-2019 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test compact blocks (BIP 152). Only testing Version 1 compact blocks (txids) """ import random from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import ( BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, HeaderAndShortIDs, msg_block, msg_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, NODE_NETWORK, P2PHeaderAndShortIDs, PrefilledTransaction, ToHex, ) from test_framework.mininode import ( mininode_lock, P2PInterface, ) from test_framework.script import CScript, OP_TRUE from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import assert_equal, sync_blocks, wait_until # TestP2PConn: A peer we use to send messages to bitcoind, and store responses. class TestP2PConn(P2PInterface): def __init__(self): super().__init__() self.last_sendcmpct = [] self.block_announced = False # Store the hashes of blocks we've seen announced. # This is for synchronizing the p2p message traffic, # so we can eg wait until a particular block is announced. self.announced_blockhashes = set() def on_sendcmpct(self, message): self.last_sendcmpct.append(message) def on_cmpctblock(self, message): self.block_announced = True self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() self.announced_blockhashes.add( self.last_message["cmpctblock"].header_and_shortids.header.sha256) def on_headers(self, message): self.block_announced = True for x in self.last_message["headers"].headers: x.calc_sha256() self.announced_blockhashes.add(x.sha256) def on_inv(self, message): for x in self.last_message["inv"].inv: if x.type == 2: self.block_announced = True self.announced_blockhashes.add(x.hash) # Requires caller to hold mininode_lock def received_block_announcement(self): return self.block_announced def clear_block_announcement(self): with mininode_lock: self.block_announced = False self.last_message.pop("inv", None) self.last_message.pop("headers", None) self.last_message.pop("cmpctblock", None) def get_headers(self, locator, hashstop): msg = msg_getheaders() msg.locator.vHave = locator msg.hashstop = hashstop self.send_message(msg) def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) def request_headers_and_sync(self, locator, hashstop=0): self.clear_block_announcement() self.get_headers(locator, hashstop) wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock) self.clear_block_announcement() # Block until a block announcement for a particular block hash is # received. def wait_for_block_announcement(self, block_hash, timeout=30): def received_hash(): return (block_hash in self.announced_blockhashes) wait_until(received_hash, timeout=timeout, lock=mininode_lock) def send_await_disconnect(self, message, timeout=30): """Sends a message to the node and wait for disconnect. This is used when we want to send a message into the node that we expect will get us disconnected, eg an invalid block.""" self.send_message(message) wait_until(lambda: not self.is_connected, timeout=timeout, lock=mininode_lock) class CompactBlocksTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [[], ["-txindex"]] self.utxos = [] def build_block_on_tip(self, node): height = node.getblockcount() tip = node.getbestblockhash() mtp = node.getblockheader(tip)['mediantime'] block = create_block( int(tip, 16), create_coinbase(height + 1), mtp + 1) block.nVersion = 4 block.solve() return block # Create 10 more anyone-can-spend utxo's for testing. def make_utxos(self): # Doesn't matter which node we use, just use node0. block = self.build_block_on_tip(self.nodes[0]) self.test_node.send_and_ping(msg_block(block)) assert int(self.nodes[0].getbestblockhash(), 16) == block.sha256 self.nodes[0].generate(100) total_value = block.vtx[0].vout[0].nValue out_value = total_value // 10 tx = CTransaction() tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b'')) for i in range(10): tx.vout.append(CTxOut(out_value, CScript([OP_TRUE]))) tx.rehash() block2 = self.build_block_on_tip(self.nodes[0]) block2.vtx.append(tx) block2.hashMerkleRoot = block2.calc_merkle_root() block2.solve() self.test_node.send_and_ping(msg_block(block2)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256) self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)]) return # Test "sendcmpct" (between peers preferring the same version): # - No compact block announcements unless sendcmpct is sent. # - If sendcmpct is sent with version > preferred_version, the message is ignored. # - If sendcmpct is sent with boolean 0, then block announcements are not # made with compact blocks. # - If sendcmpct is then sent with boolean 1, then new block announcements # are made with compact blocks. # If old_node is passed in, request compact blocks with version=preferred-1 # and verify that it receives block announcements via compact block. def test_sendcmpct(self, node, test_node, preferred_version, old_node=None): # Make sure we get a SENDCMPCT message from our peer def received_sendcmpct(): return (len(test_node.last_sendcmpct) > 0) wait_until(received_sendcmpct, timeout=30, lock=mininode_lock) with mininode_lock: # Check that the first version received is the preferred one assert_equal( test_node.last_sendcmpct[0].version, preferred_version) # And that we receive versions down to 1. assert_equal(test_node.last_sendcmpct[-1].version, 1) test_node.last_sendcmpct = [] tip = int(node.getbestblockhash(), 16) def check_announcement_of_new_block(node, peer, predicate): peer.clear_block_announcement() block_hash = int(node.generate(1)[0], 16) peer.wait_for_block_announcement(block_hash, timeout=30) assert peer.block_announced with mininode_lock: assert predicate(peer), ( "block_hash={!r}, cmpctblock={!r}, inv={!r}".format( block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None))) # We shouldn't get any block announcements via cmpctblock yet. check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message) # Try one more time, this time after requesting headers. test_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message) # Test a few ways of using sendcmpct that should NOT # result in compact block announcements. # Before each test, sync the headers chain. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with too-high version sendcmpct = msg_sendcmpct() sendcmpct.version = 999 # was: preferred_version+1 sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with valid version, but announce=False sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Finally, try a SENDCMPCT message with announce=True sendcmpct.version = preferred_version sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time (no headers sync should be needed!) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time, after turning on sendheaders test_node.send_and_ping(msg_sendheaders()) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time, after sending a version-1, announce=false message. sendcmpct.version = preferred_version - 1 sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Now turn off announcements sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message) if old_node is not None: # Verify that a peer using an older protocol version can receive # announcements from this node. sendcmpct.version = 1 # preferred_version-1 sendcmpct.announce = True old_node.send_and_ping(sendcmpct) # Header sync old_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block( node, old_node, lambda p: "cmpctblock" in p.last_message) # This test actually causes bitcoind to (reasonably!) disconnect us, so do # this last. def test_invalid_cmpctblock_message(self): self.nodes[0].generate(101) block = self.build_block_on_tip(self.nodes[0]) cmpct_block = P2PHeaderAndShortIDs() cmpct_block.header = CBlockHeader(block) cmpct_block.prefilled_txn_length = 1 # This index will be too high prefilled_txn = PrefilledTransaction(1, block.vtx[0]) cmpct_block.prefilled_txn = [prefilled_txn] self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block)) assert_equal( int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock) # Compare the generated shortids to what we expect based on BIP 152, given # bitcoind's choice of nonce. def test_compactblock_construction(self, node, test_node): # Generate a bunch of transactions. node.generate(101) num_transactions = 25 address = node.getnewaddress() for i in range(num_transactions): txid = node.sendtoaddress(address, 0.1) hex_tx = node.gettransaction(txid)["hex"] tx = FromHex(CTransaction(), hex_tx) # Wait until we've seen the block announcement for the resulting tip tip = int(node.getbestblockhash(), 16) test_node.wait_for_block_announcement(tip) # Make sure we will receive a fast-announce compact block self.request_cb_announcements(test_node, node) # Now mine a block, and look at the resulting compact block. test_node.clear_block_announcement() block_hash = int(node.generate(1)[0], 16) # Store the raw block in our internal format. block = FromHex(CBlock(), node.getblock( "{:02x}".format(block_hash), False)) for tx in block.vtx: tx.calc_sha256() block.rehash() # Wait until the block was announced (via compact blocks) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: assert "cmpctblock" in test_node.last_message # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs( test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block( header_and_shortids, block_hash, block) # Now fetch the compact block using a normal non-announce getdata with mininode_lock: test_node.clear_block_announcement() inv = CInv(4, block_hash) # 4 == "CompactBlock" test_node.send_message(msg_getdata([inv])) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: assert "cmpctblock" in test_node.last_message # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs( test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block( header_and_shortids, block_hash, block) def check_compactblock_construction_from_block(self, header_and_shortids, block_hash, block): # Check that we got the right block! header_and_shortids.header.calc_sha256() assert_equal(header_and_shortids.header.sha256, block_hash) # Make sure the prefilled_txn appears to have included the coinbase assert len(header_and_shortids.prefilled_txn) >= 1 assert_equal(header_and_shortids.prefilled_txn[0].index, 0) # Check that all prefilled_txn entries match what's in the block. for entry in header_and_shortids.prefilled_txn: entry.tx.calc_sha256() # This checks the tx agree assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256) # Check that the cmpctblock message announced all the transactions. assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx)) # And now check that all the shortids are as expected as well. # Determine the siphash keys to use. [k0, k1] = header_and_shortids.get_siphash_keys() index = 0 while index < len(block.vtx): if (len(header_and_shortids.prefilled_txn) > 0 and header_and_shortids.prefilled_txn[0].index == index): # Already checked prefilled transactions above header_and_shortids.prefilled_txn.pop(0) else: tx_hash = block.vtx[index].sha256 shortid = calculate_shortid(k0, k1, tx_hash) assert_equal(shortid, header_and_shortids.shortids[0]) header_and_shortids.shortids.pop(0) index += 1 # Test that bitcoind requests compact blocks when we announce new blocks # via header or inv, and that responding to getblocktxn causes the block # to be successfully reconstructed. def test_compactblock_requests(self, node, test_node, version): # Try announcing a block with an inv or header, expect a compactblock # request for announce in ["inv", "header"]: block = self.build_block_on_tip(node) with mininode_lock: test_node.last_message.pop("getdata", None) if announce == "inv": test_node.send_message(msg_inv([CInv(2, block.sha256)])) wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock) test_node.send_header_for_blocks([block]) else: test_node.send_header_for_blocks([block]) wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock) assert_equal(len(test_node.last_message["getdata"].inv), 1) assert_equal(test_node.last_message["getdata"].inv[0].type, 4) assert_equal( test_node.last_message["getdata"].inv[0].hash, block.sha256) # Send back a compactblock message that omits the coinbase comp_block = HeaderAndShortIDs() comp_block.header = CBlockHeader(block) comp_block.nonce = 0 [k0, k1] = comp_block.get_siphash_keys() coinbase_hash = block.vtx[0].sha256 comp_block.shortids = [ calculate_shortid(k0, k1, coinbase_hash)] test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # Expect a getblocktxn message. with mininode_lock: assert "getblocktxn" in test_node.last_message absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute( ) assert_equal(absolute_indexes, [0]) # should be a coinbase request # Send the coinbase, and verify that the tip advances. msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = [block.vtx[0]] test_node.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) # Create a chain of transactions from given utxo, and add to a new block. # Note that num_transactions is number of transactions not including the # coinbase. def build_block_with_transactions(self, node, utxo, num_transactions): block = self.build_block_on_tip(node) for i in range(num_transactions): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b'')) tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE]))) pad_tx(tx) tx.rehash() utxo = [tx.sha256, 0, tx.vout[0].nValue] block.vtx.append(tx) ordered_txs = block.vtx block.vtx = [block.vtx[0]] + \ sorted(block.vtx[1:], key=lambda tx: tx.get_id()) block.hashMerkleRoot = block.calc_merkle_root() block.solve() return block, ordered_txs # Test that we only receive getblocktxn requests for transactions that the # node needs, and that responding to them causes the block to be # reconstructed. def test_getblocktxn_requests(self, node, test_node, version): def test_getblocktxn_response(compact_block, peer, expected_result): msg = msg_cmpctblock(compact_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert "getblocktxn" in peer.last_message absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute( ) assert_equal(absolute_indexes, expected_result) def test_tip_after_message(node, peer, msg, tip): peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), tip) # First try announcing compactblocks that won't reconstruct, and verify # that we receive getblocktxn messages back. utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5]) msg_bt = msg_blocktxn() msg_bt.block_transactions = BlockTransactions( block.sha256, block.vtx[1:]) test_tip_after_message(node, test_node, msg_bt, block.sha256) utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) # Now try interspersing the prefilled transactions comp_block.initialize_from_block( block, prefill_list=[0, 1, 5]) test_getblocktxn_response(comp_block, test_node, [2, 3, 4]) msg_bt.block_transactions = BlockTransactions( block.sha256, block.vtx[2:5]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now try giving one transaction ahead of time. utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) test_node.send_and_ping(msg_tx(ordered_txs[1])) assert ordered_txs[1].hash in node.getrawmempool() test_node.send_and_ping(msg_tx(ordered_txs[1])) # Prefill 4 out of the 6 transactions, and verify that only the one # that was not in the mempool is requested. prefill_list = [0, 1, 2, 3, 4, 5] prefill_list.remove(block.vtx.index(ordered_txs[1])) expected_index = block.vtx.index(ordered_txs[-1]) prefill_list.remove(expected_index) comp_block.initialize_from_block(block, prefill_list=prefill_list) test_getblocktxn_response(comp_block, test_node, [expected_index]) msg_bt.block_transactions = BlockTransactions( block.sha256, [ordered_txs[5]]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now provide all transactions to the node before the block is # announced and verify reconstruction happens immediately. utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 10) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) for tx in block.vtx[1:]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in block.vtx[1:]: assert tx.hash in mempool # Clear out last request. with mininode_lock: test_node.last_message.pop("getblocktxn", None) # Send compact block comp_block.initialize_from_block(block, prefill_list=[0]) test_tip_after_message( node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256) with mininode_lock: # Shouldn't have gotten a request for any transaction assert "getblocktxn" not in test_node.last_message # Incorrectly responding to a getblocktxn shouldn't cause the block to be # permanently failed. def test_incorrect_blocktxn_response(self, node, test_node, version): if (len(self.utxos) == 0): self.make_utxos() utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 10) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) # Relay the first 5 transactions from the block in advance for tx in ordered_txs[1:6]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in ordered_txs[1:6]: assert tx.hash in mempool # Send compact block comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0]) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) absolute_indices = [] with mininode_lock: assert "getblocktxn" in test_node.last_message absolute_indices = test_node.last_message["getblocktxn"].block_txn_request.to_absolute( ) expected_indices = [] for i in [6, 7, 8, 9, 10]: expected_indices.append(block.vtx.index(ordered_txs[i])) assert_equal(absolute_indices, sorted(expected_indices)) # Now give an incorrect response. # Note that it's possible for bitcoind to be smart enough to know we're # lying, since it could check to see if the shortid matches what we're # sending, and eg disconnect us for misbehavior. If that behavior # change was made, we could just modify this test by having a # different peer provide the block further down, so that we're still # verifying that the block isn't marked bad permanently. This is good # enough for now. msg = msg_blocktxn() msg.block_transactions = BlockTransactions( block.sha256, [ordered_txs[5]] + ordered_txs[7:]) test_node.send_and_ping(msg) # Tip should not have updated assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # We should receive a getdata request wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock) assert_equal(len(test_node.last_message["getdata"].inv), 1) assert test_node.last_message["getdata"].inv[0].type == 2 assert_equal( test_node.last_message["getdata"].inv[0].hash, block.sha256) # Deliver the block test_node.send_and_ping(msg_block(block)) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def test_getblocktxn_handler(self, node, test_node, version): # bitcoind will not send blocktxn responses for blocks whose height is # more than 10 blocks deep. MAX_GETBLOCKTXN_DEPTH = 10 chain_height = node.getblockcount() current_height = chain_height while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH): block_hash = node.getblockhash(current_height) block = FromHex(CBlock(), node.getblock(block_hash, False)) msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest( int(block_hash, 16), []) num_to_request = random.randint(1, len(block.vtx)) msg.block_txn_request.from_absolute( sorted(random.sample(range(len(block.vtx)), num_to_request))) test_node.send_message(msg) wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock) [tx.calc_sha256() for tx in block.vtx] with mininode_lock: assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int( block_hash, 16)) all_indices = msg.block_txn_request.to_absolute() for index in all_indices: tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop( 0) tx.calc_sha256() assert_equal(tx.sha256, block.vtx[index].sha256) test_node.last_message.pop("blocktxn", None) current_height -= 1 # Next request should send a full block response, as we're past the # allowed depth for a blocktxn response. block_hash = node.getblockhash(current_height) msg.block_txn_request = BlockTransactionsRequest( int(block_hash, 16), [0]) with mininode_lock: test_node.last_message.pop("block", None) test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with mininode_lock: test_node.last_message["block"].block.calc_sha256() assert_equal( test_node.last_message["block"].block.sha256, int(block_hash, 16)) assert "blocktxn" not in test_node.last_message def test_compactblocks_not_at_tip(self, node, test_node): # Test that requesting old compactblocks doesn't work. MAX_CMPCTBLOCK_DEPTH = 5 new_blocks = [] for i in range(MAX_CMPCTBLOCK_DEPTH + 1): test_node.clear_block_announcement() new_blocks.append(node.generate(1)[0]) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) test_node.clear_block_announcement() test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock) test_node.clear_block_announcement() node.generate(1) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) test_node.clear_block_announcement() with mininode_lock: test_node.last_message.pop("block", None) test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock) with mininode_lock: test_node.last_message["block"].block.calc_sha256() assert_equal( test_node.last_message["block"].block.sha256, int(new_blocks[0], 16)) # Generate an old compactblock, and verify that it's not accepted. cur_height = node.getblockcount() hashPrevBlock = int(node.getblockhash(cur_height - 5), 16) block = self.build_block_on_tip(node) block.hashPrevBlock = hashPrevBlock block.solve() comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) tips = node.getchaintips() found = False for x in tips: if x["hash"] == block.hash: assert_equal(x["status"], "headers-only") found = True break assert found # Requesting this block via getblocktxn should silently fail # (to avoid fingerprinting attacks). msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0]) with mininode_lock: test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with mininode_lock: assert "blocktxn" not in test_node.last_message def test_end_to_end_block_relay(self, node, listeners): utxo = self.utxos.pop(0) block, _ = self.build_block_with_transactions(node, utxo, 10) [l.clear_block_announcement() for l in listeners] node.submitblock(ToHex(block)) for l in listeners: wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock) with mininode_lock: for l in listeners: assert "cmpctblock" in l.last_message l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256( ) assert_equal( l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256) # Test that we don't get disconnected if we relay a compact block with valid header, # but invalid transactions. def test_invalid_tx_in_compactblock(self, node, test_node): assert len(self.utxos) utxo = self.utxos[0] block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) block.vtx.remove(ordered_txs[3]) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Now send the compact block with all transactions prefilled, and # verify that we don't get disconnected. comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4]) msg = msg_cmpctblock(comp_block.to_p2p()) test_node.send_and_ping(msg) # Check that the tip didn't advance assert int(node.getbestblockhash(), 16) is not block.sha256 test_node.sync_with_ping() # Helper for enabling cb announcements # Send the sendcmpct request and sync headers def request_cb_announcements(self, peer, node, version=1): tip = node.getbestblockhash() peer.get_headers(locator=[int(tip, 16)], hashstop=0) msg = msg_sendcmpct() msg.version = version msg.announce = True peer.send_and_ping(msg) def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer): assert len(self.utxos) def announce_cmpct_block(node, peer): utxo = self.utxos.pop(0) block, _ = self.build_block_with_transactions(node, utxo, 5) cmpct_block = HeaderAndShortIDs() cmpct_block.initialize_from_block(block) msg = msg_cmpctblock(cmpct_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert "getblocktxn" in peer.last_message return block, cmpct_block block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() mempool = node.getrawmempool() for tx in block.vtx[1:]: assert tx.hash in mempool delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.sha256) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Now test that delivering an invalid compact block won't break relay block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() # TODO: modify txhash in a way that doesn't impact txid. delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) # Because txhash isn't modified, we end up reconstructing the same block # assert int(node.getbestblockhash(), 16) != block.sha256 msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = block.vtx[1:] stalling_peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def run_test(self): # Setup the p2p connections self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn()) self.ex_softfork_node = self.nodes[1].add_p2p_connection( TestP2PConn(), services=NODE_NETWORK) self.old_node = self.nodes[1].add_p2p_connection( TestP2PConn(), services=NODE_NETWORK) # We will need UTXOs to construct transactions in later tests. self.make_utxos() self.log.info("Running tests:") self.log.info("\tTesting SENDCMPCT p2p message... ") self.test_sendcmpct(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_sendcmpct( self.nodes[1], self.ex_softfork_node, 1, old_node=self.old_node) sync_blocks(self.nodes) self.log.info("\tTesting compactblock construction...") self.test_compactblock_construction(self.nodes[0], self.test_node) sync_blocks(self.nodes) self.test_compactblock_construction( self.nodes[1], self.ex_softfork_node) sync_blocks(self.nodes) self.log.info("\tTesting compactblock requests... ") self.test_compactblock_requests(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_compactblock_requests( self.nodes[1], self.ex_softfork_node, 2) sync_blocks(self.nodes) self.log.info("\tTesting getblocktxn requests...") self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_getblocktxn_requests(self.nodes[1], self.ex_softfork_node, 2) sync_blocks(self.nodes) self.log.info("\tTesting getblocktxn handler...") self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_getblocktxn_handler(self.nodes[1], self.ex_softfork_node, 2) self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) sync_blocks(self.nodes) self.log.info( "\tTesting compactblock requests/announcements not at chain tip...") self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node) sync_blocks(self.nodes) self.test_compactblocks_not_at_tip( self.nodes[1], self.ex_softfork_node) self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node) sync_blocks(self.nodes) self.log.info("\tTesting handling of incorrect blocktxn responses...") self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_incorrect_blocktxn_response( self.nodes[1], self.ex_softfork_node, 2) sync_blocks(self.nodes) # End-to-end block relay tests self.log.info("\tTesting end-to-end block relay...") self.request_cb_announcements(self.test_node, self.nodes[0]) self.request_cb_announcements(self.old_node, self.nodes[1]) self.request_cb_announcements( self.ex_softfork_node, self.nodes[1], version=2) self.test_end_to_end_block_relay( self.nodes[0], [self.ex_softfork_node, self.test_node, self.old_node]) self.test_end_to_end_block_relay( self.nodes[1], [self.ex_softfork_node, self.test_node, self.old_node]) self.log.info("\tTesting handling of invalid compact blocks...") self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node) self.test_invalid_tx_in_compactblock( self.nodes[1], self.ex_softfork_node) self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node) self.log.info( "\tTesting reconstructing compact blocks from all peers...") self.test_compactblock_reconstruction_multiple_peers( self.nodes[1], self.ex_softfork_node, self.old_node) sync_blocks(self.nodes) self.log.info("\tTesting invalid index in cmpctblock message...") self.test_invalid_cmpctblock_message() if __name__ == '__main__': CompactBlocksTest().main() diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py index 055311969..6c1b51834 100755 --- a/test/functional/p2p_feefilter.py +++ b/test/functional/p2p_feefilter.py @@ -1,105 +1,105 @@ #!/usr/bin/env python3 -# Copyright (c) 2016 The Bitcoin Core developers +# Copyright (c) 2016-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test processing of feefilter messages.""" from decimal import Decimal import time from test_framework.messages import msg_feefilter from test_framework.mininode import ( mininode_lock, P2PInterface, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import sync_blocks, sync_mempools def hashToHex(hash): return format(hash, '064x') # Wait up to 60 secs to see if the testnode has received all the expected invs def allInvsMatch(invsExpected, testnode): for x in range(60): with mininode_lock: if (sorted(invsExpected) == sorted(testnode.txinvs)): return True time.sleep(1) return False class TestP2PConn(P2PInterface): def __init__(self): super().__init__() self.txinvs = [] def on_inv(self, message): for i in message.inv: if (i.type == 1): self.txinvs.append(hashToHex(i.hash)) def clear_invs(self): with mininode_lock: self.txinvs = [] class FeeFilterTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 def run_test(self): node1 = self.nodes[1] node0 = self.nodes[0] # Get out of IBD node1.generate(1) sync_blocks(self.nodes) self.nodes[0].add_p2p_connection(TestP2PConn()) # Test that invs are received for all txs at feerate of 20 sat/byte node1.settxfee(Decimal("0.00020000")) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert allInvsMatch(txids, self.nodes[0].p2p) self.nodes[0].p2p.clear_invs() # Set a filter of 15 sat/byte self.nodes[0].p2p.send_and_ping(msg_feefilter(15000)) # Test that txs are still being received (paying 20 sat/byte) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert allInvsMatch(txids, self.nodes[0].p2p) self.nodes[0].p2p.clear_invs() # Change tx fee rate to 10 sat/byte and test they are no longer # received node1.settxfee(Decimal("0.00010000")) [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] sync_mempools(self.nodes) # must be sure node 0 has received all txs # Send one transaction from node0 that should be received, so that we # we can sync the test on receipt (if node1's txs were relayed, they'd # be received by the time this node0 tx is received). This is # unfortunately reliant on the current relay behavior where we batch up # to 35 entries in an inv, which means that when this next transaction # is eligible for relay, the prior transactions from node1 are eligible # as well. node0.settxfee(Decimal("0.00020000")) txids = [node0.sendtoaddress(node0.getnewaddress(), 1)] assert allInvsMatch(txids, self.nodes[0].p2p) self.nodes[0].p2p.clear_invs() # Remove fee filter and check that txs are received again self.nodes[0].p2p.send_and_ping(msg_feefilter(0)) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert allInvsMatch(txids, self.nodes[0].p2p) self.nodes[0].p2p.clear_invs() if __name__ == '__main__': FeeFilterTest().main() diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py index eecaa2876..43fa750cb 100755 --- a/test/functional/p2p_leak.py +++ b/test/functional/p2p_leak.py @@ -1,171 +1,171 @@ #!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test message sending before handshake completion. A node should never send anything other than VERSION/VERACK/REJECT until it's received a VERACK. This test connects to a node and sends it a few messages, trying to entice it into sending us something it shouldn't. """ import time from test_framework.messages import ( msg_getaddr, msg_ping, msg_verack, ) from test_framework.mininode import ( mininode_lock, P2PInterface, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import wait_until banscore = 10 class CLazyNode(P2PInterface): def __init__(self): super().__init__() self.unexpected_msg = False self.ever_connected = False def bad_message(self, message): self.unexpected_msg = True self.log.info( "should not have received message: {}".format(message.command)) def on_open(self): self.ever_connected = True def on_version(self, message): self.bad_message(message) def on_verack(self, message): self.bad_message(message) def on_reject(self, message): self.bad_message(message) def on_inv(self, message): self.bad_message(message) def on_addr(self, message): self.bad_message(message) def on_getdata(self, message): self.bad_message(message) def on_getblocks(self, message): self.bad_message(message) def on_tx(self, message): self.bad_message(message) def on_block(self, message): self.bad_message(message) def on_getaddr(self, message): self.bad_message(message) def on_headers(self, message): self.bad_message(message) def on_getheaders(self, message): self.bad_message(message) def on_ping(self, message): self.bad_message(message) def on_mempool(self, message): self.bad_message(message) def on_pong(self, message): self.bad_message(message) def on_feefilter(self, message): self.bad_message(message) def on_sendheaders(self, message): self.bad_message(message) def on_sendcmpct(self, message): self.bad_message(message) def on_cmpctblock(self, message): self.bad_message(message) def on_getblocktxn(self, message): self.bad_message(message) def on_blocktxn(self, message): self.bad_message(message) # Node that never sends a version. We'll use this to send a bunch of messages # anyway, and eventually get disconnected. class CNodeNoVersionBan(CLazyNode): # send a bunch of veracks without sending a message. This should get us disconnected. # NOTE: implementation-specific check here. Remove if bitcoind ban behavior changes def on_open(self): super().on_open() for i in range(banscore): self.send_message(msg_verack()) def on_reject(self, message): pass # Node that never sends a version. This one just sits idle and hopes to receive # any message (it shouldn't!) class CNodeNoVersionIdle(CLazyNode): def __init__(self): super().__init__() # Node that sends a version but not a verack. class CNodeNoVerackIdle(CLazyNode): def __init__(self): self.version_received = False super().__init__() def on_reject(self, message): pass def on_verack(self, message): pass # When version is received, don't reply with a verack. Instead, see if the # node will give us a message that it shouldn't. This is not an exhaustive # list! def on_version(self, message): self.version_received = True self.send_message(msg_ping()) self.send_message(msg_getaddr()) class P2PLeakTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [['-banscore=' + str(banscore)]] def run_test(self): no_version_bannode = self.nodes[0].add_p2p_connection( CNodeNoVersionBan(), send_version=False, wait_for_verack=False) no_version_idlenode = self.nodes[0].add_p2p_connection( CNodeNoVersionIdle(), send_version=False, wait_for_verack=False) no_verack_idlenode = self.nodes[0].add_p2p_connection( CNodeNoVerackIdle()) wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock) wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock) wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock) # Mine a block and make sure that it's not sent to the connected nodes self.nodes[0].generate(1) # Give the node enough time to possibly leak out a message time.sleep(5) # This node should have been banned assert not no_version_bannode.is_connected self.nodes[0].disconnect_p2ps() # Wait until all connections are closed wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0) # Make sure no unexpected messages came in assert no_version_bannode.unexpected_msg == False assert no_version_idlenode.unexpected_msg == False assert no_verack_idlenode.unexpected_msg == False if __name__ == '__main__': P2PLeakTest().main() diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py index 2e929abc5..fcdf8a573 100755 --- a/test/functional/p2p_unrequested_blocks.py +++ b/test/functional/p2p_unrequested_blocks.py @@ -1,351 +1,351 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test processing of unrequested blocks. Setup: two nodes, node0+node1, not connected to each other. Node1 will have nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks. We have one P2PInterface connection to node0 called test_node, and one to node1 called min_work_node. The test: 1. Generate one block on each node, to leave IBD. 2. Mine a new block on each tip, and deliver to each node from node's peer. The tip should advance for node0, but node1 should skip processing due to nMinimumChainWork. Node1 is unused in tests 3-7: 3. Mine a block that forks from the genesis block, and deliver to test_node. Node0 should not process this block (just accept the header), because it is unrequested and doesn't have more or equal work to the tip. 4a,b. Send another two blocks that build on the forking block. Node0 should process the second block but be stuck on the shorter chain, because it's missing an intermediate block. 4c.Send 288 more blocks on the longer chain (the number of blocks ahead we currently store). Node0 should process all but the last block (too far ahead in height). 5. Send a duplicate of the block in #3 to Node0. Node0 should not process the block because it is unrequested, and stay on the shorter chain. 6. Send Node0 an inv for the height 3 block produced in #4 above. Node0 should figure out that Node0 has the missing height 2 block and send a getdata. 7. Send Node0 the missing block again. Node0 should process and the tip should advance. 8. Create a fork which is invalid at a height longer than the current chain (ie to which the node will try to reorg) but which has headers built on top of the invalid block. Check that we get disconnected if we send more headers on the chain the node now knows to be invalid. 9. Test Node1 is able to sync when connected to node0 (which should have sufficient work on its chain). """ import time from test_framework.blocktools import ( create_block, create_coinbase, create_transaction, ) from test_framework.messages import ( CBlockHeader, CInv, msg_block, msg_headers, msg_inv, ) from test_framework.mininode import ( mininode_lock, P2PInterface, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, connect_nodes, sync_blocks, ) class AcceptBlockTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [["-noparkdeepreorg"], ["-minimumchainwork=0x10"]] def setup_network(self): # Node0 will be used to test behavior of processing unrequested blocks # from peers which are not whitelisted, while Node1 will be used for # the whitelisted case. # Node2 will be used for non-whitelisted peers to test the interaction # with nMinimumChainWork. self.setup_nodes() def run_test(self): # Setup the p2p connections # test_node connects to node0 (not whitelisted) test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # min_work_node connects to node1 (whitelisted) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) # 1. Have nodes mine a block (leave IBD) [n.generate(1) for n in self.nodes] tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes] # 2. Send one block that builds on each tip. # This should be accepted by node0 blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): blocks_h2.append(create_block( tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) min_work_node.send_message(msg_block(blocks_h2[1])) for x in [test_node, min_work_node]: x.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 1) self.log.info( "First height 2 block accepted by node0; correctly rejected by node1") # 3. Send another block that builds on genesis. block_h1f = create_block( int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time) block_time += 1 block_h1f.solve() test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h1f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) # 4. Send another two block that build on the fork. block_h2f = create_block( block_h1f.sha256, create_coinbase(2), block_time) block_time += 1 block_h2f.solve() test_node.send_message(msg_block(block_h2f)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h2f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found # But this block should be accepted by node since it has equal work. self.nodes[0].getblock(block_h2f.hash) self.log.info("Second height 2 block accepted, but not reorg'ed to") # 4b. Now send another block that builds on the forking chain. block_h3 = create_block( block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1) block_h3.solve() test_node.send_message(msg_block(block_h3)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h3.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found self.nodes[0].getblock(block_h3.hash) # But this block should be accepted by node since it has more work. self.nodes[0].getblock(block_h3.hash) self.log.info("Unrequested more-work block accepted") # 4c. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node (as long as it is not missing any headers) tip = block_h3 all_blocks = [] for i in range(288): next_block = create_block( tip.sha256, create_coinbase(i + 4), tip.nTime+1) next_block.solve() all_blocks.append(next_block) tip = next_block # Now send the block at height 5 and check that it wasn't accepted (missing header) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) # The block at height 5 should be accepted if we provide the missing header, though headers_message = msg_headers() headers_message.headers.append(CBlockHeader(all_blocks[0])) test_node.send_message(headers_message) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() self.nodes[0].getblock(all_blocks[1].hash) # Now send the blocks in all_blocks for i in range(288): test_node.send_message(msg_block(all_blocks[i])) test_node.sync_with_ping() # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error( -1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). # The node should have requested the blocks at some point, so # disconnect/reconnect first self.nodes[0].disconnect_p2ps() self.nodes[1].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info( "Unrequested block that would complete more-work chain was ignored") # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(msg_inv([CInv(2, block_h3.sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, block_h1f.sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.nodes[0].getblock(all_blocks[286].hash) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) self.log.info( "Successfully reorged to longer chain from non-whitelisted peer") # 8. Create a chain which is invalid at a height longer than the # current chain, but which has more blocks on top of that block_289f = create_block( all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1) block_289f.solve() block_290f = create_block( block_289f.sha256, create_coinbase(290), block_289f.nTime+1) block_290f.solve() block_291 = create_block( block_290f.sha256, create_coinbase(291), block_290f.nTime+1) # block_291 spends a coinbase below maturity! block_291.vtx.append(create_transaction( block_290f.vtx[0], 0, b"42", 1)) block_291.hashMerkleRoot = block_291.calc_merkle_root() block_291.solve() block_292 = create_block( block_291.sha256, create_coinbase(292), block_291.nTime+1) block_292.solve() # Now send all the headers on the chain and enough blocks to trigger reorg headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_289f)) headers_message.headers.append(CBlockHeader(block_290f)) headers_message.headers.append(CBlockHeader(block_291)) headers_message.headers.append(CBlockHeader(block_292)) test_node.send_message(headers_message) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_292.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash) test_node.send_message(msg_block(block_289f)) test_node.send_message(msg_block(block_290f)) test_node.sync_with_ping() self.nodes[0].getblock(block_289f.hash) self.nodes[0].getblock(block_290f.hash) test_node.send_message(msg_block(block_291)) # At this point we've sent an obviously-bogus block, wait for full processing # without assuming whether we will be disconnected or not try: # Only wait a short while so the test doesn't take forever if we do get # disconnected test_node.sync_with_ping(timeout=1) except AssertionError: test_node.wait_for_disconnect() self.nodes[0].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # We should have failed reorg and switched back to 290 (but have block 291) assert_equal(self.nodes[0].getblockcount(), 290) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_equal(self.nodes[0].getblock( block_291.hash)["confirmations"], -1) # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected block_293 = create_block( block_292.sha256, create_coinbase(293), block_292.nTime+1) block_293.solve() headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_293)) test_node.send_message(headers_message) test_node.wait_for_disconnect() # 9. Connect node1 to node0 and ensure it is able to sync connect_nodes(self.nodes[0], self.nodes[1]) sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Successfully synced nodes 1 and 0") if __name__ == '__main__': AcceptBlockTest().main() diff --git a/test/functional/rpc_bind.py b/test/functional/rpc_bind.py index 0bde659b1..bb497d22a 100755 --- a/test/functional/rpc_bind.py +++ b/test/functional/rpc_bind.py @@ -1,141 +1,141 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test running bitcoind with the -rpcbind and -rpcallowip options.""" from platform import uname import socket import sys from test_framework.netutil import addr_to_hex, all_interfaces, get_bind_addrs from test_framework.test_framework import BitcoinTestFramework, SkipTest from test_framework.util import ( assert_equal, assert_raises_rpc_error, get_rpc_proxy, rpc_port, rpc_url, ) class RPCBindTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.bind_to_localhost_only = False self.num_nodes = 1 def setup_network(self): self.add_nodes(self.num_nodes, None) def run_bind_test(self, allow_ips, connect_to, addresses, expected): ''' Start a node with requested rpcallowip and rpcbind parameters, then try to connect, and check if the set of bound addresses matches the expected set. ''' self.log.info("Bind test for {}".format(str(addresses))) expected = [(addr_to_hex(addr), port) for (addr, port) in expected] base_args = ['-disablewallet', '-nolisten'] if allow_ips: base_args += ['-rpcallowip=' + x for x in allow_ips] binds = ['-rpcbind=' + addr for addr in addresses] parts = connect_to.split(':') if len(parts) == 2: self.nodes[0].host = parts[0] self.nodes[0].rpc_port = parts[1] else: self.nodes[0].host = connect_to self.nodes[0].rpc_port = rpc_port(self.nodes[0].index) self.start_node(0, base_args + binds) pid = self.nodes[0].process.pid assert_equal(set(get_bind_addrs(pid)), set(expected)) self.stop_nodes() def run_allowip_test(self, allow_ips, rpchost, rpcport): ''' Start a node with rpcallow IP, and request getnetworkinfo at a non-localhost IP. ''' self.log.info("Allow IP test for {}:{}".format(rpchost, rpcport)) base_args = ['-disablewallet', '-nolisten'] + \ ['-rpcallowip=' + x for x in allow_ips] self.nodes[0].host = None self.start_nodes([base_args]) # connect to node through non-loopback interface url = rpc_url(self.nodes[0].datadir, rpchost, rpcport) node = get_rpc_proxy(url, 0, coveragedir=self.options.coveragedir) node.getnetworkinfo() self.stop_nodes() def run_test(self): # due to OS-specific network stats queries, this test works only on Linux if not sys.platform.startswith('linux'): raise SkipTest("This test can only be run on Linux.") # WSL in currently not supported (refer to # https://reviews.bitcoinabc.org/T400 for details). # This condition should be removed once netstat support is provided by # Microsoft. if "microsoft" in uname().version.lower(): raise SkipTest( "Running this test on WSL is currently not supported") # find the first non-loopback interface for testing non_loopback_ip = None for name, ip in all_interfaces(): if ip != '127.0.0.1': non_loopback_ip = ip break if non_loopback_ip is None: raise SkipTest( "This test requires at least one non-loopback IPv4 interface.") try: s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) s.connect(("::1", 1)) s.close except OSError: raise SkipTest("This test requires IPv6 support.") self.log.info("Using interface {} for testing".format(non_loopback_ip)) defaultport = rpc_port(0) # check default without rpcallowip (IPv4 and IPv6 localhost) self.run_bind_test(None, '127.0.0.1', [], [('127.0.0.1', defaultport), ('::1', defaultport)]) # check default with rpcallowip (IPv6 any) self.run_bind_test(['127.0.0.1'], '127.0.0.1', [], [('::0', defaultport)]) # check only IPv4 localhost (explicit) self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'], [('127.0.0.1', defaultport)]) # check only IPv4 localhost (explicit) with alternative port self.run_bind_test( ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'], [('127.0.0.1', 32171)]) # check only IPv4 localhost (explicit) with multiple alternative ports # on same host self.run_bind_test( ['127.0.0.1'], '127.0.0.1:32171', [ '127.0.0.1:32171', '127.0.0.1:32172'], [('127.0.0.1', 32171), ('127.0.0.1', 32172)]) # check only IPv6 localhost (explicit) self.run_bind_test(['[::1]'], '[::1]', ['[::1]'], [('::1', defaultport)]) # check both IPv4 and IPv6 localhost (explicit) self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'], [('127.0.0.1', defaultport), ('::1', defaultport)]) # check only non-loopback interface self.run_bind_test( [non_loopback_ip], non_loopback_ip, [non_loopback_ip], [(non_loopback_ip, defaultport)]) # Check that with invalid rpcallowip, we are denied self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport) assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport) if __name__ == '__main__': RPCBindTest().main() diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index 42dc2aac3..f0f32db26 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -1,348 +1,348 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test RPCs related to blockchainstate. Test the following RPCs: - getblockchaininfo - gettxoutsetinfo - getdifficulty - getbestblockhash - getblockhash - getblockheader - getchaintxstats - getnetworkhashps - verifychain Tests correspond to code in rpc/blockchain.cpp. """ from decimal import Decimal import http.client import subprocess from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_greater_than_or_equal, assert_raises, assert_raises_rpc_error, assert_is_hash_string, assert_is_hex_string, ) from test_framework.blocktools import ( create_block, create_coinbase, ) from test_framework.messages import ( msg_block, ) from test_framework.mininode import ( P2PInterface, ) class BlockchainTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [['-stopatheight=207', '-prune=1']] def run_test(self): self._test_getblockchaininfo() self._test_getchaintxstats() self._test_gettxoutsetinfo() self._test_getblockheader() self._test_getdifficulty() self._test_getnetworkhashps() self._test_stopatheight() self._test_getblock() self._test_waitforblockheight() assert self.nodes[0].verifychain(4, 0) def _test_getblockchaininfo(self): self.log.info("Test getblockchaininfo") keys = [ 'bestblockhash', 'blocks', 'chain', 'chainwork', 'difficulty', 'headers', 'initialblockdownload', 'mediantime', 'pruned', 'size_on_disk', 'verificationprogress', 'warnings', ] res = self.nodes[0].getblockchaininfo() # result should have these additional pruning keys if manual pruning is # enabled assert_equal(sorted(res.keys()), sorted( ['pruneheight', 'automatic_pruning'] + keys)) # size_on_disk should be > 0 assert_greater_than(res['size_on_disk'], 0) # pruneheight should be greater or equal to 0 assert_greater_than_or_equal(res['pruneheight'], 0) # check other pruning fields given that prune=1 assert res['pruned'] assert not res['automatic_pruning'] self.restart_node(0, ['-stopatheight=207']) res = self.nodes[0].getblockchaininfo() # should have exact keys assert_equal(sorted(res.keys()), keys) self.restart_node(0, ['-stopatheight=207', '-prune=550']) res = self.nodes[0].getblockchaininfo() # result should have these additional pruning keys if prune=550 assert_equal(sorted(res.keys()), sorted( ['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys)) # check related fields assert res['pruned'] assert_equal(res['pruneheight'], 0) assert res['automatic_pruning'] assert_equal(res['prune_target_size'], 576716800) assert_greater_than(res['size_on_disk'], 0) def _test_getchaintxstats(self): self.log.info("Test getchaintxstats") # Test `getchaintxstats` invalid extra parameters assert_raises_rpc_error( -1, 'getchaintxstats', self.nodes[0].getchaintxstats, 0, '', 0) # Test `getchaintxstats` invalid `nblocks` assert_raises_rpc_error( -1, "JSON value is not an integer as expected", self.nodes[0].getchaintxstats, '') assert_raises_rpc_error( -8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, -1) assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[ 0].getchaintxstats, self.nodes[0].getblockcount()) # Test `getchaintxstats` invalid `blockhash` assert_raises_rpc_error( -1, "JSON value is not a string as expected", self.nodes[0].getchaintxstats, blockhash=0) assert_raises_rpc_error( -5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0') blockhash = self.nodes[0].getblockhash(200) self.nodes[0].invalidateblock(blockhash) assert_raises_rpc_error( -8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash) self.nodes[0].reconsiderblock(blockhash) chaintxstats = self.nodes[0].getchaintxstats(1) # 200 txs plus genesis tx assert_equal(chaintxstats['txcount'], 201) # tx rate should be 1 per 10 minutes, or 1/600 # we have to round because of binary math assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1)) b1_hash = self.nodes[0].getblockhash(1) b1 = self.nodes[0].getblock(b1_hash) b200_hash = self.nodes[0].getblockhash(200) b200 = self.nodes[0].getblock(b200_hash) time_diff = b200['mediantime'] - b1['mediantime'] chaintxstats = self.nodes[0].getchaintxstats() assert_equal(chaintxstats['time'], b200['time']) assert_equal(chaintxstats['txcount'], 201) assert_equal(chaintxstats['window_final_block_hash'], b200_hash) assert_equal(chaintxstats['window_block_count'], 199) assert_equal(chaintxstats['window_tx_count'], 199) assert_equal(chaintxstats['window_interval'], time_diff) assert_equal( round(chaintxstats['txrate'] * time_diff, 10), Decimal(199)) chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash) assert_equal(chaintxstats['time'], b1['time']) assert_equal(chaintxstats['txcount'], 2) assert_equal(chaintxstats['window_final_block_hash'], b1_hash) assert_equal(chaintxstats['window_block_count'], 0) assert 'window_tx_count' not in chaintxstats assert 'window_interval' not in chaintxstats assert 'txrate' not in chaintxstats def _test_gettxoutsetinfo(self): node = self.nodes[0] res = node.gettxoutsetinfo() assert_equal(res['total_amount'], Decimal('8725.00000000')) assert_equal(res['transactions'], 200) assert_equal(res['height'], 200) assert_equal(res['txouts'], 200) assert_equal(res['bogosize'], 17000), assert_equal(res['bestblock'], node.getblockhash(200)) size = res['disk_size'] assert size > 6400 assert size < 64000 assert_equal(len(res['bestblock']), 64) assert_equal(len(res['hash_serialized']), 64) self.log.info( "Test that gettxoutsetinfo() works for blockchain with just the genesis block") b1hash = node.getblockhash(1) node.invalidateblock(b1hash) res2 = node.gettxoutsetinfo() assert_equal(res2['transactions'], 0) assert_equal(res2['total_amount'], Decimal('0')) assert_equal(res2['height'], 0) assert_equal(res2['txouts'], 0) assert_equal(res2['bogosize'], 0), assert_equal(res2['bestblock'], node.getblockhash(0)) assert_equal(len(res2['hash_serialized']), 64) self.log.info( "Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block") node.reconsiderblock(b1hash) res3 = node.gettxoutsetinfo() assert_equal(res['total_amount'], res3['total_amount']) assert_equal(res['transactions'], res3['transactions']) assert_equal(res['height'], res3['height']) assert_equal(res['txouts'], res3['txouts']) assert_equal(res['bogosize'], res3['bogosize']) assert_equal(res['bestblock'], res3['bestblock']) assert_equal(res['hash_serialized'], res3['hash_serialized']) def _test_getblockheader(self): node = self.nodes[0] assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "nonsense") besthash = node.getbestblockhash() secondbesthash = node.getblockhash(199) header = node.getblockheader(besthash) assert_equal(header['hash'], besthash) assert_equal(header['height'], 200) assert_equal(header['confirmations'], 1) assert_equal(header['previousblockhash'], secondbesthash) assert_is_hex_string(header['chainwork']) assert_is_hash_string(header['hash']) assert_is_hash_string(header['previousblockhash']) assert_is_hash_string(header['merkleroot']) assert_is_hash_string(header['bits'], length=None) assert isinstance(header['time'], int) assert isinstance(header['mediantime'], int) assert isinstance(header['nonce'], int) assert isinstance(header['version'], int) assert isinstance(int(header['versionHex'], 16), int) assert isinstance(header['difficulty'], Decimal) def _test_getdifficulty(self): difficulty = self.nodes[0].getdifficulty() # 1 hash in 2 should be valid, so difficulty should be 1/2**31 # binary => decimal => binary math is why we do this check assert abs(difficulty * 2**31 - 1) < 0.0001 def _test_getnetworkhashps(self): hashes_per_second = self.nodes[0].getnetworkhashps() # This should be 2 hashes every 10 minutes or 1/300 assert abs(hashes_per_second * 300 - 1) < 0.0001 def _test_stopatheight(self): assert_equal(self.nodes[0].getblockcount(), 200) self.nodes[0].generate(6) assert_equal(self.nodes[0].getblockcount(), 206) self.log.debug('Node should not stop at this height') assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3)) try: self.nodes[0].generate(1) except (ConnectionError, http.client.BadStatusLine): pass # The node already shut down before response self.log.debug('Node should stop at this height...') self.nodes[0].wait_until_stopped() self.start_node(0) assert_equal(self.nodes[0].getblockcount(), 207) def _test_getblock(self): # Checks for getblock verbose outputs node = self.nodes[0] getblockinfo = node.getblock(node.getblockhash(1), 2) gettransactioninfo = node.gettransaction(getblockinfo['tx'][0]['txid']) getblockheaderinfo = node.getblockheader(node.getblockhash(1), True) assert_equal(getblockinfo['hash'], gettransactioninfo['blockhash']) assert_equal( getblockinfo['confirmations'], gettransactioninfo['confirmations']) assert_equal(getblockinfo['height'], getblockheaderinfo['height']) assert_equal( getblockinfo['versionHex'], getblockheaderinfo['versionHex']) assert_equal(getblockinfo['version'], getblockheaderinfo['version']) assert_equal(getblockinfo['size'], 188) assert_equal( getblockinfo['merkleroot'], getblockheaderinfo['merkleroot']) # Verify transaction data by check the hex values for tx in getblockinfo['tx']: getrawtransaction = node.getrawtransaction(tx['txid'], True) assert_equal(tx['hex'], getrawtransaction['hex']) assert_equal(getblockinfo['time'], getblockheaderinfo['time']) assert_equal( getblockinfo['mediantime'], getblockheaderinfo['mediantime']) assert_equal(getblockinfo['nonce'], getblockheaderinfo['nonce']) assert_equal(getblockinfo['bits'], getblockheaderinfo['bits']) assert_equal( getblockinfo['difficulty'], getblockheaderinfo['difficulty']) assert_equal( getblockinfo['chainwork'], getblockheaderinfo['chainwork']) assert_equal( getblockinfo['previousblockhash'], getblockheaderinfo['previousblockhash']) assert_equal( getblockinfo['nextblockhash'], getblockheaderinfo['nextblockhash']) def _test_waitforblockheight(self): self.log.info("Test waitforblockheight") node = self.nodes[0] node.add_p2p_connection(P2PInterface()) current_height = node.getblock(node.getbestblockhash())['height'] # Create a fork somewhere below our current height, invalidate the tip # of that fork, and then ensure that waitforblockheight still # works as expected. # # (Previously this was broken based on setting # `rpc/blockchain.cpp:latestblock` incorrectly.) # b20hash = node.getblockhash(20) b20 = node.getblock(b20hash) def solve_and_send_block(prevhash, height, time): b = create_block(prevhash, create_coinbase(height), time) b.solve() node.p2p.send_message(msg_block(b)) node.p2p.sync_with_ping() return b b21f = solve_and_send_block(int(b20hash, 16), 21, b20['time'] + 1) b22f = solve_and_send_block(b21f.sha256, 22, b21f.nTime + 1) node.invalidateblock(b22f.hash) def assert_waitforheight(height, timeout=2): assert_equal( node.waitforblockheight(height, timeout)['height'], current_height) assert_waitforheight(0) assert_waitforheight(current_height - 1) assert_waitforheight(current_height) assert_waitforheight(current_height + 1) if __name__ == '__main__': BlockchainTest().main() diff --git a/test/functional/rpc_decodescript.py b/test/functional/rpc_decodescript.py index 43b613e19..7c4ac8235 100755 --- a/test/functional/rpc_decodescript.py +++ b/test/functional/rpc_decodescript.py @@ -1,216 +1,216 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test decoding scripts via decodescript RPC command.""" from test_framework.messages import CTransaction, FromHex, ToHex from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, hex_str_to_bytes class DecodeScriptTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def decodescript_script_sig(self): signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001' push_signature = '48' + signature public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2' push_public_key = '21' + public_key # below are test cases for all of the standard transaction types # 1) P2PK scriptSig # the scriptSig of a public key scriptPubKey simply pushes a signature # onto the stack rpc_result = self.nodes[0].decodescript(push_signature) assert_equal(signature, rpc_result['asm']) # 2) P2PKH scriptSig rpc_result = self.nodes[0].decodescript( push_signature + push_public_key) assert_equal(signature + ' ' + public_key, rpc_result['asm']) # 3) multisig scriptSig # this also tests the leading portion of a P2SH multisig scriptSig # OP_0 rpc_result = self.nodes[0].decodescript( '00' + push_signature + push_signature) assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm']) # 4) P2SH scriptSig # an empty P2SH redeemScript is valid and makes for a very simple test case. # thus, such a spending scriptSig would just need to pass the outer redeemScript # hash test and leave true on the top of the stack. rpc_result = self.nodes[0].decodescript('5100') assert_equal('1 0', rpc_result['asm']) # 5) null data scriptSig - no such thing because null data scripts can not be spent. # thus, no test case for that standard transaction type is here. def decodescript_script_pub_key(self): public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2' push_public_key = '21' + public_key public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777' push_public_key_hash = '14' + public_key_hash # below are test cases for all of the standard transaction types # 1) P2PK scriptPubKey # OP_CHECKSIG rpc_result = self.nodes[0].decodescript(push_public_key + 'ac') assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm']) # 2) P2PKH scriptPubKey # OP_DUP OP_HASH160 OP_EQUALVERIFY OP_CHECKSIG rpc_result = self.nodes[0].decodescript( '76a9' + push_public_key_hash + '88ac') assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm']) # 3) multisig scriptPubKey # OP_CHECKMULTISIG # just imagine that the pub keys used below are different. # for our purposes here it does not matter that they are the same even # though it is unrealistic. rpc_result = self.nodes[0].decodescript( '52' + push_public_key + push_public_key + push_public_key + '53ae') assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm']) # 4) P2SH scriptPubKey # OP_HASH160 OP_EQUAL. # push_public_key_hash here should actually be the hash of a redeem script. # but this works the same for purposes of this test. rpc_result = self.nodes[0].decodescript( 'a9' + push_public_key_hash + '87') assert_equal( 'OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm']) # 5) null data scriptPubKey # use a signature look-alike here to make sure that we do not decode random data as a signature. # this matters if/when signature sighash decoding comes along. # would want to make sure that no such decoding takes place in this # case. signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001' # OP_RETURN rpc_result = self.nodes[0].decodescript('6a' + signature_imposter) assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm']) # 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here. # OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY. # just imagine that the pub keys used below are different. # for our purposes here it does not matter that they are the same even though it is unrealistic. # # OP_IF # OP_CHECKSIGVERIFY # OP_ELSE # OP_CHECKLOCKTIMEVERIFY OP_DROP # OP_ENDIF # OP_CHECKSIG # # lock until block 500,000 rpc_result = self.nodes[0].decodescript( '63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac') assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm']) def decoderawtransaction_asm_sighashtype(self): """Test decoding scripts via RPC command "decoderawtransaction". This test is in with the "decodescript" tests because they are testing the same "asm" script decodes. """ # this test case uses a random plain vanilla mainnet transaction with a # single P2PKH input and output tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal( '304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm']) # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs. # it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc # verify that we have not altered scriptPubKey decoding. tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal( '8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid']) assert_equal( '0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm']) assert_equal( 'OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) assert_equal( 'OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) txSave = FromHex(CTransaction(), tx) # make sure that a specifically crafted op_return value will not pass # all the IsDERSignature checks and then get decoded as a sighash type tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm']) # verify that we have not altered scriptPubKey processing even of a # specially crafted P2PKH pubkeyhash and P2SH redeem script hash that # is made to pass the der signature checks tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal( 'OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) assert_equal( 'OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) # some more full transaction tests of varying specific scriptSigs. used instead of # tests in decodescript_script_sig because the decodescript RPC is specifically # for working on scriptPubKeys (argh!). push_signature = txSave.vin[0].scriptSig.hex()[2:(0x48 * 2 + 4)] signature = push_signature[2:] der_signature = signature[:-2] signature_sighash_decoded = der_signature + '[ALL]' signature_2 = der_signature + '82' push_signature_2 = '48' + signature_2 signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]' # 1) P2PK scriptSig txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature) rpc_result = self.nodes[0].decoderawtransaction(ToHex(txSave)) assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # make sure that the sighash decodes come out correctly for a more # complex / lesser used case. txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2) rpc_result = self.nodes[0].decoderawtransaction(ToHex(txSave)) assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # 2) multisig scriptSig txSave.vin[0].scriptSig = hex_str_to_bytes( '00' + push_signature + push_signature_2) rpc_result = self.nodes[0].decoderawtransaction(ToHex(txSave)) assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # 3) test a scriptSig that contains more than push operations. # in fact, it contains an OP_RETURN with data specially crafted to # cause improper decode if the code does not catch it. txSave.vin[0].scriptSig = hex_str_to_bytes( '6a143011020701010101010101020601010101010101') rpc_result = self.nodes[0].decoderawtransaction(ToHex(txSave)) assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm']) def run_test(self): self.decodescript_script_sig() self.decodescript_script_pub_key() self.decoderawtransaction_asm_sighashtype() if __name__ == '__main__': DecodeScriptTest().main() diff --git a/test/functional/rpc_deprecated.py b/test/functional/rpc_deprecated.py index f196eba6f..0046c7a4e 100755 --- a/test/functional/rpc_deprecated.py +++ b/test/functional/rpc_deprecated.py @@ -1,27 +1,27 @@ #!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test deprecation of RPC calls.""" from test_framework.test_framework import BitcoinTestFramework class DeprecatedRpcTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True self.extra_args = [ [], []] def run_test(self): # This test should be used to verify correct behaviour of deprecated # RPC methods with and without the -deprecatedrpc flags. For example: # # self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses") # assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()]) # self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()]) self.log.info("No tested deprecated RPC methods") if __name__ == '__main__': DeprecatedRpcTest().main() diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py index d1f9ea50f..ea8baa075 100755 --- a/test/functional/rpc_fundrawtransaction.py +++ b/test/functional/rpc_fundrawtransaction.py @@ -1,795 +1,795 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from decimal import Decimal from test_framework.messages import CTransaction, FromHex from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_fee_amount, assert_greater_than, assert_greater_than_or_equal, assert_raises_rpc_error, connect_nodes_bi, find_vout_for_address, ) def get_unspent(listunspent, amount): for utx in listunspent: if utx['amount'] == amount: return utx raise AssertionError( 'Could not find unspent with amount={}'.format(amount)) class RawTransactionsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.setup_clean_chain = True self.extra_args = [[], [], [], []] def setup_network(self, split=False): self.setup_nodes() connect_nodes_bi(self.nodes[0], self.nodes[1]) connect_nodes_bi(self.nodes[1], self.nodes[2]) connect_nodes_bi(self.nodes[0], self.nodes[2]) connect_nodes_bi(self.nodes[0], self.nodes[3]) def run_test(self): min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee'] # This test is not meant to test fee estimation and we'd like # to be sure all txs are sent at a consistent desired feerate for node in self.nodes: node.settxfee(min_relay_tx_fee) # if the fee's positive delta is higher than this value tests will fail, # neg. delta always fail the tests. # The size of the signature of every input may be at most 2 bytes larger # than a minimum sized signature. # = 2 bytes * minRelayTxFeePerByte feeTolerance = 2 * min_relay_tx_fee / 1000 self.nodes[2].generate(1) self.sync_all() self.nodes[0].generate(121) self.sync_all() # ensure that setting changePosition in fundraw with an exact match is handled properly rawmatch = self.nodes[2].createrawtransaction( [], {self.nodes[2].getnewaddress(): 50}) rawmatch = self.nodes[2].fundrawtransaction( rawmatch, {"changePosition": 1, "subtractFeeFromOutputs": [0]}) assert_equal(rawmatch["changepos"], -1) watchonly_address = self.nodes[0].getnewaddress() watchonly_pubkey = self.nodes[ 0].getaddressinfo(watchonly_address)["pubkey"] watchonly_amount = Decimal(200) self.nodes[3].importpubkey(watchonly_pubkey, "", True) watchonly_txid = self.nodes[0].sendtoaddress( watchonly_address, watchonly_amount) # Lock UTXO so nodes[0] doesn't accidentally spend it watchonly_vout = find_vout_for_address( self.nodes[0], watchonly_txid, watchonly_address) self.nodes[0].lockunspent( False, [{"txid": watchonly_txid, "vout": watchonly_vout}]) self.nodes[0].sendtoaddress( self.nodes[3].getnewaddress(), watchonly_amount / 10) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0) self.nodes[0].generate(1) self.sync_all() # # simple test # # inputs = [] outputs = {self.nodes[0].getnewaddress(): 1.0} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawTx) rawtxfund = self.nodes[2].fundrawtransaction(rawTx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) # test that we have enough inputs assert len(dec_tx['vin']) > 0 # # simple test with two coins # # inputs = [] outputs = {self.nodes[0].getnewaddress(): 2.2} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawTx) rawtxfund = self.nodes[2].fundrawtransaction(rawTx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) # test if we have enough inputs assert len(dec_tx['vin']) > 0 # # simple test with two coins # # inputs = [] outputs = {self.nodes[0].getnewaddress(): 2.6} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawTx) rawtxfund = self.nodes[2].fundrawtransaction(rawTx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert len(dec_tx['vin']) > 0 assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') # # simple test with two outputs # # inputs = [] outputs = { self.nodes[0].getnewaddress(): 2.6, self.nodes[1].getnewaddress(): 2.5} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawTx) rawtxfund = self.nodes[2].fundrawtransaction(rawTx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] assert len(dec_tx['vin']) > 0 assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') # # test a fundrawtransaction with a VIN greater than the required amount # # utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}] outputs = {self.nodes[0].getnewaddress(): 1.0} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawTx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawTx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] # compare vin total and totalout+fee assert_equal(fee + totalOut, utx['amount']) # # test a fundrawtransaction with which will not get a change output # # utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}] outputs = { self.nodes[0].getnewaddress(): Decimal(5.0) - fee - feeTolerance} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawTx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawTx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] assert_equal(rawtxfund['changepos'], -1) assert_equal(fee + totalOut, utx['amount']) # compare vin total and totalout+fee # # test a fundrawtransaction with an invalid option # # utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}] outputs = {self.nodes[0].getnewaddress(): Decimal(4.0)} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawTx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[ 2].fundrawtransaction, rawTx, {'foo': 'bar'}) # # test a fundrawtransaction with an invalid change address # # utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}] outputs = {self.nodes[0].getnewaddress(): Decimal(4.0)} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawTx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_raises_rpc_error( -5, "changeAddress must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawTx, {'changeAddress': 'foobar'}) # # test a fundrawtransaction with a provided change address # # utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}] outputs = {self.nodes[0].getnewaddress(): Decimal(4.0)} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawTx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) change = self.nodes[2].getnewaddress() assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[ 2].fundrawtransaction, rawTx, {'changeAddress': change, 'changePosition': 2}) rawtxfund = self.nodes[2].fundrawtransaction( rawTx, {'changeAddress': change, 'changePosition': 0}) dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) out = dec_tx['vout'][0] assert_equal(change, out['scriptPubKey']['addresses'][0]) # # test a fundrawtransaction with a VIN smaller than the required amount # # utx = get_unspent(self.nodes[2].listunspent(), 1) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}] outputs = {self.nodes[0].getnewaddress(): 1.0} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) # 4-byte version + 1-byte vin count + 36-byte prevout then script_len rawTx = rawTx[:82] + "0100" + rawTx[84:] dec_tx = self.nodes[2].decoderawtransaction(rawTx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) rawtxfund = self.nodes[2].fundrawtransaction(rawTx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for i, out in enumerate(dec_tx['vout']): totalOut += out['value'] if out['scriptPubKey']['addresses'][0] in outputs: matchingOuts += 1 else: assert_equal(i, rawtxfund['changepos']) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) assert_equal(matchingOuts, 1) assert_equal(len(dec_tx['vout']), 2) # # test a fundrawtransaction with two VINs # # utx = get_unspent(self.nodes[2].listunspent(), 1) utx2 = get_unspent(self.nodes[2].listunspent(), 5) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}, {'txid': utx2['txid'], 'vout': utx2['vout']}] outputs = {self.nodes[0].getnewaddress(): 6.0} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawTx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawTx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for out in dec_tx['vout']: totalOut += out['value'] if out['scriptPubKey']['addresses'][0] in outputs: matchingOuts += 1 assert_equal(matchingOuts, 1) assert_equal(len(dec_tx['vout']), 2) matchingIns = 0 for vinOut in dec_tx['vin']: for vinIn in inputs: if vinIn['txid'] == vinOut['txid']: matchingIns += 1 # we now must see two vins identical to vins given as params assert_equal(matchingIns, 2) # # test a fundrawtransaction with two VINs and two vOUTs # # utx = get_unspent(self.nodes[2].listunspent(), 1) utx2 = get_unspent(self.nodes[2].listunspent(), 5) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}, {'txid': utx2['txid'], 'vout': utx2['vout']}] outputs = { self.nodes[0].getnewaddress(): 6.0, self.nodes[0].getnewaddress(): 1.0} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawTx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawTx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for out in dec_tx['vout']: totalOut += out['value'] if out['scriptPubKey']['addresses'][0] in outputs: matchingOuts += 1 assert_equal(matchingOuts, 2) assert_equal(len(dec_tx['vout']), 3) # # test a fundrawtransaction with invalid vin # # inputs = [ {'txid': "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout': 0}] # invalid vin! outputs = {self.nodes[0].getnewaddress(): 1.0} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawTx) assert_raises_rpc_error( -4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawTx) # # compare fee of a standard pubkeyhash transaction inputs = [] outputs = {self.nodes[1].getnewaddress(): 1.1} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) # create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] # compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert feeDelta >= 0 and feeDelta <= feeTolerance # # # compare fee of a standard pubkeyhash transaction with multiple # outputs inputs = [] outputs = {self.nodes[1].getnewaddress(): 1.1, self.nodes[1].getnewaddress(): 1.2, self.nodes[1].getnewaddress(): 0.1, self.nodes[ 1].getnewaddress(): 1.3, self.nodes[1].getnewaddress(): 0.2, self.nodes[1].getnewaddress(): 0.3} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) # create same transaction over sendtoaddress txId = self.nodes[0].sendmany("", outputs) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] # compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert feeDelta >= 0 and feeDelta <= feeTolerance # # # compare fee of a 2of2 multisig p2sh transaction # create 2of2 addr addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[1].getnewaddress() addr1Obj = self.nodes[1].getaddressinfo(addr1) addr2Obj = self.nodes[1].getaddressinfo(addr2) mSigObj = self.nodes[1].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] inputs = [] outputs = {mSigObj: 1.1} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) # create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(mSigObj, 1.1) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] # compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert feeDelta >= 0 and feeDelta <= feeTolerance # # # compare fee of a standard pubkeyhash transaction # create 4of5 addr addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[1].getnewaddress() addr3 = self.nodes[1].getnewaddress() addr4 = self.nodes[1].getnewaddress() addr5 = self.nodes[1].getnewaddress() addr1Obj = self.nodes[1].getaddressinfo(addr1) addr2Obj = self.nodes[1].getaddressinfo(addr2) addr3Obj = self.nodes[1].getaddressinfo(addr3) addr4Obj = self.nodes[1].getaddressinfo(addr4) addr5Obj = self.nodes[1].getaddressinfo(addr5) mSigObj = self.nodes[1].addmultisigaddress( 4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address'] inputs = [] outputs = {mSigObj: 1.1} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) # create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(mSigObj, 1.1) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] # compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert feeDelta >= 0 and feeDelta <= feeTolerance # # # spend a 2of2 multisig transaction over fundraw # create 2of2 addr addr1 = self.nodes[2].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[2].getaddressinfo(addr1) addr2Obj = self.nodes[2].getaddressinfo(addr2) mSigObj = self.nodes[2].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] # send 1.2 BCH to msig addr txId = self.nodes[0].sendtoaddress(mSigObj, 1.2) self.sync_all() self.nodes[1].generate(1) self.sync_all() oldBalance = self.nodes[1].getbalance() inputs = [] outputs = {self.nodes[1].getnewaddress(): 1.1} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) fundedTx = self.nodes[2].fundrawtransaction(rawTx) signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex']) txId = self.nodes[2].sendrawtransaction(signedTx['hex']) self.sync_all() self.nodes[1].generate(1) self.sync_all() # make sure funds are received at node1 assert_equal( oldBalance + Decimal('1.10000000'), self.nodes[1].getbalance()) # # locked wallet test self.stop_node(0) self.nodes[1].node_encrypt_wallet("test") self.stop_node(2) self.stop_node(3) self.start_nodes() # This test is not meant to test fee estimation and we'd like # to be sure all txs are sent at a consistent desired feerate for node in self.nodes: node.settxfee(min_relay_tx_fee) connect_nodes_bi(self.nodes[0], self.nodes[1]) connect_nodes_bi(self.nodes[1], self.nodes[2]) connect_nodes_bi(self.nodes[0], self.nodes[2]) connect_nodes_bi(self.nodes[0], self.nodes[3]) # Again lock the watchonly UTXO or nodes[0] may spend it, because # lockunspent is memory-only and thus lost on restart self.nodes[0].lockunspent( False, [{"txid": watchonly_txid, "vout": watchonly_vout}]) self.sync_all() # drain the keypool self.nodes[1].getnewaddress() self.nodes[1].getrawchangeaddress() inputs = [] outputs = {self.nodes[0].getnewaddress(): 1.1} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) # fund a transaction that requires a new key for the change output # creating the key must be impossible because the wallet is locked assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawTx) # refill the keypool self.nodes[1].walletpassphrase("test", 100) # need to refill the keypool to get an internal change address self.nodes[1].keypoolrefill(8) self.nodes[1].walletlock() assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[ 1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2) oldBalance = self.nodes[0].getbalance() inputs = [] outputs = {self.nodes[0].getnewaddress(): 1.1} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawTx) # now we need to unlock self.nodes[1].walletpassphrase("test", 600) signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex']) txId = self.nodes[1].sendrawtransaction(signedTx['hex']) self.nodes[1].generate(1) self.sync_all() # make sure funds are received at node1 assert_equal( oldBalance + Decimal('51.10000000'), self.nodes[0].getbalance()) # # multiple (~19) inputs tx test | Compare fee # # # empty node1, send some small coins from node0 to node1 self.nodes[1].sendtoaddress( self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True) self.sync_all() self.nodes[0].generate(1) self.sync_all() for i in range(0, 20): self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) self.nodes[0].generate(1) self.sync_all() # fund a tx with ~20 small inputs inputs = [] outputs = { self.nodes[0].getnewaddress(): 0.15, self.nodes[0].getnewaddress(): 0.04} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawTx) # create same transaction over sendtoaddress txId = self.nodes[1].sendmany("", outputs) signedFee = self.nodes[1].getrawmempool(True)[txId]['fee'] # compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) # ~19 inputs assert feeDelta >= 0 and feeDelta <= feeTolerance * 19 # # multiple (~19) inputs tx test | sign/send # # # again, empty node1, send some small coins from node0 to node1 self.nodes[1].sendtoaddress( self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True) self.sync_all() self.nodes[0].generate(1) self.sync_all() for i in range(0, 20): self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) self.nodes[0].generate(1) self.sync_all() # fund a tx with ~20 small inputs oldBalance = self.nodes[0].getbalance() inputs = [] outputs = { self.nodes[0].getnewaddress(): 0.15, self.nodes[0].getnewaddress(): 0.04} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawTx) fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet( fundedTx['hex']) txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() assert_equal(oldBalance + Decimal('50.19000000'), self.nodes[0].getbalance()) # 0.19+block reward # # test fundrawtransaction with OP_RETURN and no vin # # rawTx = "0100000000010000000000000000066a047465737400000000" dec_tx = self.nodes[2].decoderawtransaction(rawTx) assert_equal(len(dec_tx['vin']), 0) assert_equal(len(dec_tx['vout']), 1) rawtxfund = self.nodes[2].fundrawtransaction(rawTx) dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert_greater_than(len(dec_tx['vin']), 0) # at least one vin assert_equal(len(dec_tx['vout']), 2) # one change output added # # test a fundrawtransaction using only watchonly # # inputs = [] outputs = {self.nodes[2].getnewaddress(): watchonly_amount / 2} rawTx = self.nodes[3].createrawtransaction(inputs, outputs) result = self.nodes[3].fundrawtransaction( rawTx, {'includeWatching': True}) res_dec = self.nodes[0].decoderawtransaction(result["hex"]) assert_equal(len(res_dec["vin"]), 1) assert_equal(res_dec["vin"][0]["txid"], watchonly_txid) assert "fee" in result.keys() assert_greater_than(result["changepos"], -1) # # test fundrawtransaction using the entirety of watched funds # # inputs = [] outputs = {self.nodes[2].getnewaddress(): watchonly_amount} rawTx = self.nodes[3].createrawtransaction(inputs, outputs) # Backward compatibility test (2nd param is includeWatching) result = self.nodes[3].fundrawtransaction(rawTx, True) res_dec = self.nodes[0].decoderawtransaction(result["hex"]) assert_equal(len(res_dec["vin"]), 2) assert res_dec["vin"][0]["txid"] == watchonly_txid or res_dec[ "vin"][1]["txid"] == watchonly_txid assert_greater_than(result["fee"], 0) assert_greater_than(result["changepos"], -1) assert_equal(result["fee"] + res_dec["vout"][ result["changepos"]]["value"], watchonly_amount / 10) signedtx = self.nodes[3].signrawtransactionwithwallet(result["hex"]) assert not signedtx["complete"] signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"]) assert signedtx["complete"] self.nodes[0].sendrawtransaction(signedtx["hex"]) self.nodes[0].generate(1) self.sync_all() # # Test feeRate option # # # Make sure there is exactly one input so coin selection can't skew the # result assert_equal(len(self.nodes[3].listunspent(1)), 1) inputs = [] outputs = {self.nodes[3].getnewaddress(): 1} rawTx = self.nodes[3].createrawtransaction(inputs, outputs) result = self.nodes[3].fundrawtransaction( rawTx) # uses min_relay_tx_fee (set by settxfee) result2 = self.nodes[3].fundrawtransaction( rawTx, {"feeRate": 2 * min_relay_tx_fee}) result_fee_rate = result['fee'] * 1000 / \ FromHex(CTransaction(), result['hex']).billable_size() assert_fee_amount( result2['fee'], FromHex(CTransaction(), result2['hex']).billable_size(), 2 * result_fee_rate) result3 = self.nodes[3].fundrawtransaction( rawTx, {"feeRate": 10 * min_relay_tx_fee}) # allow this transaction to be underfunded by 10 bytes. This is due # to the first transaction possibly being overfunded by up to .9 # satoshi due to fee ceilings being used. assert_fee_amount( result3['fee'], FromHex(CTransaction(), result3['hex']).billable_size(), 10 * result_fee_rate, 10) # # Test no address reuse occurs # # result3 = self.nodes[3].fundrawtransaction(rawTx) res_dec = self.nodes[0].decoderawtransaction(result3["hex"]) changeaddress = "" for out in res_dec['vout']: if out['value'] > 1.0: changeaddress += out['scriptPubKey']['addresses'][0] assert changeaddress != "" nextaddr = self.nodes[3].getnewaddress() # Now the change address key should be removed from the keypool assert changeaddress != nextaddr # # Test subtractFeeFromOutputs option # # # Make sure there is exactly one input so coin selection can't skew the # result assert_equal(len(self.nodes[3].listunspent(1)), 1) inputs = [] outputs = {self.nodes[2].getnewaddress(): 1} rawTx = self.nodes[3].createrawtransaction(inputs, outputs) result = [self.nodes[3].fundrawtransaction(rawTx), # uses min_relay_tx_fee (set by settxfee) self.nodes[3].fundrawtransaction( rawTx, {"subtractFeeFromOutputs": []}), # empty subtraction list self.nodes[3].fundrawtransaction( rawTx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee) self.nodes[3].fundrawtransaction( rawTx, {"feeRate": 2 * min_relay_tx_fee}), self.nodes[3].fundrawtransaction(rawTx, {"feeRate": 2 * min_relay_tx_fee, "subtractFeeFromOutputs": [0]})] dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result] output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)] change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)] assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee']) assert_equal(result[3]['fee'], result[4]['fee']) assert_equal(change[0], change[1]) assert_equal(output[0], output[1]) assert_equal(output[0], output[2] + result[2]['fee']) assert_equal(change[0] + result[0]['fee'], change[2]) assert_equal(output[3], output[4] + result[4]['fee']) assert_equal(change[3] + result[3]['fee'], change[4]) inputs = [] outputs = { self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)} rawTx = self.nodes[3].createrawtransaction(inputs, outputs) result = [self.nodes[3].fundrawtransaction(rawTx), # split the fee between outputs 0, 2, and 3, but not output 1 self.nodes[3].fundrawtransaction(rawTx, {"subtractFeeFromOutputs": [0, 2, 3]})] dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']), self.nodes[3].decoderawtransaction(result[1]['hex'])] # Nested list of non-change output amounts for each transaction output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']] for d, r in zip(dec_tx, result)] # List of differences in output amounts between normal and subtractFee # transactions share = [o0 - o1 for o0, o1 in zip(output[0], output[1])] # output 1 is the same in both transactions assert_equal(share[1], 0) # the other 3 outputs are smaller as a result of subtractFeeFromOutputs assert_greater_than(share[0], 0) assert_greater_than(share[2], 0) assert_greater_than(share[3], 0) # outputs 2 and 3 take the same share of the fee assert_equal(share[2], share[3]) # output 0 takes at least as much share of the fee, and no more than 2 # satoshis more, than outputs 2 and 3 assert_greater_than_or_equal(share[0], share[2]) assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0]) # the fee is the same in both transactions assert_equal(result[0]['fee'], result[1]['fee']) # the total subtracted from the outputs is equal to the fee assert_equal(share[0] + share[2] + share[3], result[0]['fee']) if __name__ == '__main__': RawTransactionsTest().main() diff --git a/test/functional/rpc_getblockstats.py b/test/functional/rpc_getblockstats.py index 99e8daba3..3ed9120ae 100755 --- a/test/functional/rpc_getblockstats.py +++ b/test/functional/rpc_getblockstats.py @@ -1,204 +1,204 @@ #!/usr/bin/env python3 -# Copyright (c) 2017-2017 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test getblockstats rpc call # import decimal import json import os import time from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, ) TESTSDIR = os.path.dirname(os.path.realpath(__file__)) def EncodeDecimal(o): if isinstance(o, decimal.Decimal): # json.load will read a quoted float as a string and not convert it back # to decimal, so store the value as unquoted float instead. return float(o) raise TypeError(repr(o) + " is not JSON serializable") class GetblockstatsTest(BitcoinTestFramework): start_height = 101 max_stat_pos = 2 STATS_NEED_TXINDEX = [ 'avgfee', 'avgfeerate', 'maxfee', 'maxfeerate', 'medianfee', 'medianfeerate', 'minfee', 'minfeerate', 'totalfee', 'utxo_size_inc', ] def add_options(self, parser): parser.add_argument('--gen-test-data', dest='gen_test_data', default=False, action='store_true', help='Generate test data') parser.add_argument('--test-data', dest='test_data', default='data/rpc_getblockstats.json', action='store', metavar='FILE', help='Test data file') def set_test_params(self): self.num_nodes = 2 self.extra_args = [['-txindex'], ['-paytxfee=0.003']] self.setup_clean_chain = True def get_stats(self): return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos + 1)] def generate_test_data(self, filename): mocktime = time.time() self.nodes[0].generate(101) self.nodes[0].sendtoaddress( address=self.nodes[1].getnewaddress(), amount=10, subtractfeefromamount=True) self.nodes[0].generate(1) self.sync_all() self.nodes[0].sendtoaddress( address=self.nodes[0].getnewaddress(), amount=10, subtractfeefromamount=True) self.nodes[0].sendtoaddress( address=self.nodes[0].getnewaddress(), amount=10, subtractfeefromamount=False) self.nodes[1].sendtoaddress( address=self.nodes[0].getnewaddress(), amount=1, subtractfeefromamount=True) self.sync_all() self.nodes[0].generate(1) self.expected_stats = self.get_stats() blocks = [] tip = self.nodes[0].getbestblockhash() blockhash = None height = 0 while tip != blockhash: blockhash = self.nodes[0].getblockhash(height) blocks.append(self.nodes[0].getblock(blockhash, 0)) height += 1 to_dump = { 'blocks': blocks, 'mocktime': int(mocktime), 'stats': self.expected_stats, } with open(filename, 'w', encoding="utf8") as f: json.dump(to_dump, f, sort_keys=True, indent=2, default=EncodeDecimal) def load_test_data(self, filename): with open(filename, 'r', encoding="utf8") as f: d = json.load(f, parse_float=decimal.Decimal) blocks = d['blocks'] mocktime = d['mocktime'] self.expected_stats = d['stats'] self.log.info(self.expected_stats) # Set the timestamps from the file so that the nodes can get out of Initial Block Download self.nodes[0].setmocktime(mocktime) self.nodes[1].setmocktime(mocktime) for i, b in enumerate(blocks): self.nodes[0].submitblock(b) def run_test(self): test_data = os.path.join(TESTSDIR, self.options.test_data) if self.options.gen_test_data: self.generate_test_data(test_data) else: self.load_test_data(test_data) self.sync_all() stats = self.get_stats() expected_stats_noindex = [] for stat_row in stats: expected_stats_noindex.append( {k: v for k, v in stat_row.items() if k not in self.STATS_NEED_TXINDEX}) # Make sure all valid statistics are included but nothing else is expected_keys = self.expected_stats[0].keys() assert_equal(set(stats[0].keys()), set(expected_keys)) assert_equal(stats[0]['height'], self.start_height) assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos) for i in range(self.max_stat_pos + 1): self.log.info('Checking block {}\n'.format(i)) assert_equal(stats[i], self.expected_stats[i]) # Check selecting block by hash too blockhash = self.expected_stats[i]['blockhash'] stats_by_hash = self.nodes[0].getblockstats( hash_or_height=blockhash) assert_equal(stats_by_hash, self.expected_stats[i]) # Check with the node that has no txindex stats_no_txindex = self.nodes[1].getblockstats( hash_or_height=blockhash, stats=list(expected_stats_noindex[i].keys())) assert_equal(stats_no_txindex, expected_stats_noindex[i]) # Make sure each stat can be queried on its own for stat in expected_keys: for i in range(self.max_stat_pos + 1): result = self.nodes[0].getblockstats( hash_or_height=self.start_height + i, stats=[stat]) assert_equal(list(result.keys()), [stat]) if result[stat] != self.expected_stats[i][stat]: self.log.info('result[{}] ({}) failed, {!r} != {!r}'.format( stat, i, result[stat], self.expected_stats[i][stat])) assert_equal(result[stat], self.expected_stats[i][stat]) # Make sure only the selected statistics are included (more than one) some_stats = {'minfee', 'maxfee'} stats = self.nodes[0].getblockstats( hash_or_height=1, stats=list(some_stats)) assert_equal(set(stats.keys()), some_stats) # Test invalid parameters raise the proper json exceptions tip = self.start_height + self.max_stat_pos assert_raises_rpc_error(-8, 'Target block height {} after current tip {}'.format( tip + 1, tip), self.nodes[0].getblockstats, hash_or_height=tip + 1) assert_raises_rpc_error(-8, 'Target block height {} is negative'.format(-1), self.nodes[0].getblockstats, hash_or_height=-1) # Make sure not valid stats aren't allowed inv_sel_stat = 'asdfghjkl' inv_stats = [ [inv_sel_stat], ['minfee', inv_sel_stat], [inv_sel_stat, 'minfee'], ['minfee', inv_sel_stat, 'maxfee'], ] for inv_stat in inv_stats: assert_raises_rpc_error(-8, 'Invalid selected statistic {}'.format( inv_sel_stat), self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat) # Make sure we aren't always returning inv_sel_stat as the culprit stat assert_raises_rpc_error(-8, 'Invalid selected statistic aaa{}'.format(inv_sel_stat), self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee', 'aaa{}'.format(inv_sel_stat)]) assert_raises_rpc_error(-8, 'One or more of the selected stats requires -txindex enabled', self.nodes[1].getblockstats, hash_or_height=self.start_height + self.max_stat_pos) # Mainchain's genesis block shouldn't be found on regtest assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats, hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f') if __name__ == '__main__': GetblockstatsTest().main() diff --git a/test/functional/rpc_listtransactions.py b/test/functional/rpc_listtransactions.py index f523b3506..a0cb1dff6 100755 --- a/test/functional/rpc_listtransactions.py +++ b/test/functional/rpc_listtransactions.py @@ -1,97 +1,97 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the listtransactions API.""" from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_array_result class ListTransactionsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 def run_test(self): # Leave IBD self.nodes[0].generate(1) # Simple send, 0 to 1: txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) self.sync_all() assert_array_result(self.nodes[0].listtransactions(), {"txid": txid}, {"category": "send", "account": "", "amount": Decimal("-0.1"), "confirmations": 0}) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid}, {"category": "receive", "account": "", "amount": Decimal("0.1"), "confirmations": 0}) # mine a block, confirmations should change: self.nodes[0].generate(1) self.sync_all() assert_array_result(self.nodes[0].listtransactions(), {"txid": txid}, {"category": "send", "account": "", "amount": Decimal("-0.1"), "confirmations": 1}) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid}, {"category": "receive", "account": "", "amount": Decimal("0.1"), "confirmations": 1}) # send-to-self: txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid, "category": "send"}, {"amount": Decimal("-0.2")}) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid, "category": "receive"}, {"amount": Decimal("0.2")}) # sendmany from node1: twice to self, twice to node2: send_to = {self.nodes[0].getnewaddress(): 0.11, self.nodes[1].getnewaddress(): 0.22, self.nodes[0].getaccountaddress("from1"): 0.33, self.nodes[1].getaccountaddress("toself"): 0.44} txid = self.nodes[1].sendmany("", send_to) self.sync_all() assert_array_result(self.nodes[1].listtransactions(), {"category": "send", "amount": Decimal("-0.11")}, {"txid": txid}) assert_array_result(self.nodes[0].listtransactions(), {"category": "receive", "amount": Decimal("0.11")}, {"txid": txid}) assert_array_result(self.nodes[1].listtransactions(), {"category": "send", "amount": Decimal("-0.22")}, {"txid": txid}) assert_array_result(self.nodes[1].listtransactions(), {"category": "receive", "amount": Decimal("0.22")}, {"txid": txid}) assert_array_result(self.nodes[1].listtransactions(), {"category": "send", "amount": Decimal("-0.33")}, {"txid": txid}) assert_array_result(self.nodes[0].listtransactions(), {"category": "receive", "amount": Decimal("0.33")}, {"txid": txid, "account": "from1"}) assert_array_result(self.nodes[1].listtransactions(), {"category": "send", "amount": Decimal("-0.44")}, {"txid": txid, "account": ""}) assert_array_result(self.nodes[1].listtransactions(), {"category": "receive", "amount": Decimal("0.44")}, {"txid": txid, "account": "toself"}) pubkey = self.nodes[1].getaddressinfo( self.nodes[1].getnewaddress())['pubkey'] multisig = self.nodes[1].createmultisig(1, [pubkey]) self.nodes[0].importaddress( multisig["redeemScript"], "watchonly", False, True) txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1) self.nodes[1].generate(1) self.sync_all() assert len(self.nodes[0].listtransactions( "watchonly", 100, 0, False)) == 0 assert_array_result( self.nodes[0].listtransactions("watchonly", 100, 0, True), {"category": "receive", "amount": Decimal("0.1")}, {"txid": txid, "account": "watchonly"}) if __name__ == '__main__': ListTransactionsTest().main() diff --git a/test/functional/rpc_named_arguments.py b/test/functional/rpc_named_arguments.py index 14a0fe5c6..85dfe318c 100755 --- a/test/functional/rpc_named_arguments.py +++ b/test/functional/rpc_named_arguments.py @@ -1,35 +1,35 @@ #!/usr/bin/env python3 -# Copyright (c) 2016 The Bitcoin Core developers +# Copyright (c) 2016-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test using named arguments for RPCs.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error class NamedArgumentTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 def run_test(self): node = self.nodes[0] h = node.help(command='getblockchaininfo') assert h.startswith('getblockchaininfo\n') assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo') h = node.getblockhash(height=0) node.getblock(blockhash=h) assert_equal(node.echo(), []) assert_equal(node.echo(arg0=0, arg9=9), [0] + [None] * 8 + [9]) assert_equal(node.echo(arg1=1), [None, 1]) assert_equal(node.echo(arg9=None), [None] * 10) assert_equal(node.echo(arg0=0, arg3=3, arg9=9), [0] + [None] * 2 + [3] + [None] * 5 + [9]) if __name__ == '__main__': NamedArgumentTest().main() diff --git a/test/functional/rpc_preciousblock.py b/test/functional/rpc_preciousblock.py index ab6edc894..f6e7fd763 100755 --- a/test/functional/rpc_preciousblock.py +++ b/test/functional/rpc_preciousblock.py @@ -1,124 +1,124 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the preciousblock RPC.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes_bi, sync_blocks, ) def unidirectional_node_sync_via_rpc(node_src, node_dest): blocks_to_copy = [] blockhash = node_src.getbestblockhash() while True: try: assert len(node_dest.getblock(blockhash, False)) > 0 break except: blocks_to_copy.append(blockhash) blockhash = node_src.getblockheader( blockhash, True)['previousblockhash'] blocks_to_copy.reverse() for blockhash in blocks_to_copy: blockdata = node_src.getblock(blockhash, False) assert node_dest.submitblock(blockdata) in (None, 'inconclusive') def node_sync_via_rpc(nodes): for node_src in nodes: for node_dest in nodes: if node_src is node_dest: continue unidirectional_node_sync_via_rpc(node_src, node_dest) class PreciousTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [["-noparkdeepreorg"], ["-noparkdeepreorg"], ["-noparkdeepreorg"]] def setup_network(self): self.setup_nodes() def run_test(self): self.log.info( "Ensure submitblock can in principle reorg to a competing chain") self.nodes[0].generate(1) assert_equal(self.nodes[0].getblockcount(), 1) hashZ = self.nodes[1].generate(2)[-1] assert_equal(self.nodes[1].getblockcount(), 2) node_sync_via_rpc(self.nodes[0:3]) assert_equal(self.nodes[0].getbestblockhash(), hashZ) self.log.info("Mine blocks A-B-C on Node 0") hashC = self.nodes[0].generate(3)[-1] assert_equal(self.nodes[0].getblockcount(), 5) self.log.info("Mine competing blocks E-F-G on Node 1") hashG = self.nodes[1].generate(3)[-1] assert_equal(self.nodes[1].getblockcount(), 5) assert hashC != hashG self.log.info("Connect nodes and check no reorg occurs") # Submit competing blocks via RPC so any reorg should occur before we # proceed (no way to wait on inaction for p2p sync) node_sync_via_rpc(self.nodes[0:2]) connect_nodes_bi(self.nodes[0], self.nodes[1]) assert_equal(self.nodes[0].getbestblockhash(), hashC) assert_equal(self.nodes[1].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block G") self.nodes[0].preciousblock(hashG) assert_equal(self.nodes[0].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block C again") self.nodes[0].preciousblock(hashC) assert_equal(self.nodes[0].getbestblockhash(), hashC) self.log.info("Make Node1 prefer block C") self.nodes[1].preciousblock(hashC) # wait because node 1 may not have downloaded hashC sync_blocks(self.nodes[0:2]) assert_equal(self.nodes[1].getbestblockhash(), hashC) self.log.info("Make Node1 prefer block G again") self.nodes[1].preciousblock(hashG) assert_equal(self.nodes[1].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block G again") self.nodes[0].preciousblock(hashG) assert_equal(self.nodes[0].getbestblockhash(), hashG) self.log.info("Make Node1 prefer block C again") self.nodes[1].preciousblock(hashC) assert_equal(self.nodes[1].getbestblockhash(), hashC) self.log.info( "Mine another block (E-F-G-)H on Node 0 and reorg Node 1") self.nodes[0].generate(1) assert_equal(self.nodes[0].getblockcount(), 6) sync_blocks(self.nodes[0:2]) hashH = self.nodes[0].getbestblockhash() assert_equal(self.nodes[1].getbestblockhash(), hashH) self.log.info("Node1 should not be able to prefer block C anymore") self.nodes[1].preciousblock(hashC) assert_equal(self.nodes[1].getbestblockhash(), hashH) self.log.info("Mine competing blocks I-J-K-L on Node 2") self.nodes[2].generate(4) assert_equal(self.nodes[2].getblockcount(), 6) hashL = self.nodes[2].getbestblockhash() self.log.info("Connect nodes and check no reorg occurs") node_sync_via_rpc(self.nodes[1:3]) connect_nodes_bi(self.nodes[1], self.nodes[2]) connect_nodes_bi(self.nodes[0], self.nodes[2]) assert_equal(self.nodes[0].getbestblockhash(), hashH) assert_equal(self.nodes[1].getbestblockhash(), hashH) assert_equal(self.nodes[2].getbestblockhash(), hashL) self.log.info("Make Node1 prefer block L") self.nodes[1].preciousblock(hashL) assert_equal(self.nodes[1].getbestblockhash(), hashL) self.log.info("Make Node2 prefer block H") self.nodes[2].preciousblock(hashH) assert_equal(self.nodes[2].getbestblockhash(), hashH) if __name__ == '__main__': PreciousTest().main() diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py index 0ef394f70..1205dcc0a 100755 --- a/test/functional/rpc_rawtransaction.py +++ b/test/functional/rpc_rawtransaction.py @@ -1,496 +1,496 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the rawtranscation RPCs. Test the following RPCs: - createrawtransaction - signrawtransactionwithwallet - sendrawtransaction - decoderawtransaction - getrawtransaction """ from decimal import Decimal from collections import OrderedDict from io import BytesIO from test_framework.messages import CTransaction, ToHex from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_raw_tx from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes_bi, hex_str_to_bytes, ) class multidict(dict): """Dictionary that allows duplicate keys. Constructed with a list of (key, value) tuples. When dumped by the json module, will output invalid json with repeated keys, eg: >>> json.dumps(multidict([(1,2),(1,2)]) '{"1": 2, "1": 2}' Used to test calls to rpc methods with repeated keys in the json object.""" def __init__(self, x): dict.__init__(self, x) self.x = x def items(self): return self.x # Create one-input, one-output, no-fee transaction: class RawTransactionsTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 def setup_network(self, split=False): super().setup_network() connect_nodes_bi(self.nodes[0], self.nodes[2]) def run_test(self): self.log.info( 'prepare some coins for multiple *rawtransaction commands') self.nodes[2].generate(1) self.sync_all() self.nodes[0].generate(101) self.sync_all() self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0) self.sync_all() self.nodes[0].generate(5) self.sync_all() self.log.info( 'Test getrawtransaction on genesis block coinbase returns an error') block = self.nodes[0].getblock(self.nodes[0].getblockhash(0)) assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot']) self.log.info( 'Check parameter types and required parameters of createrawtransaction') # Test `createrawtransaction` required parameters assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction) assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, []) # Test `createrawtransaction` invalid extra parameters assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, 'foo') # Test `createrawtransaction` invalid `inputs` txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000' assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {}) assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {}) assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{}], {}) assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {}) assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {}) assert_raises_rpc_error(-8, "Invalid parameter, vout must be a number", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {}) assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {}) assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {}) # Test `createrawtransaction` invalid `outputs` address = self.nodes[0].getnewaddress() address2 = self.nodes[0].getnewaddress() assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo') # Should not throw for backwards compatibility self.nodes[0].createrawtransaction(inputs=[], outputs={}) self.nodes[0].createrawtransaction(inputs=[], outputs=[]) assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'}) assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].createrawtransaction, [], {'foo': 0}) assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'}) assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1}) assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: {}".format( address), self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)])) assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: {}".format( address), self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}]) assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}]) assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']]) # Test `createrawtransaction` invalid `locktime` assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo') assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1) assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296) self.log.info( 'Check that createrawtransaction accepts an array and object as outputs') tx = CTransaction() # One output tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction( inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99})))) assert_equal(len(tx.vout), 1) assert_equal( tx.serialize().hex(), self.nodes[2].createrawtransaction( inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]), ) # Two outputs tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[ {'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)]))))) assert_equal(len(tx.vout), 2) assert_equal( tx.serialize().hex(), self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[ {address: 99}, {address2: 99}]), ) # Two data outputs tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[ {'txid': txid, 'vout': 9}], outputs=multidict([('data', '99'), ('data', '99')]))))) assert_equal(len(tx.vout), 2) assert_equal( tx.serialize().hex(), self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[ {'data': '99'}, {'data': '99'}]), ) # Multiple mixed outputs tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[ {'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), ('data', '99'), ('data', '99')]))))) assert_equal(len(tx.vout), 3) assert_equal( tx.serialize().hex(), self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[ {address: 99}, {'data': '99'}, {'data': '99'}]), ) self.log.info('sendrawtransaction with missing input') # won't exists inputs = [ {'txid': "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout': 1}] outputs = {self.nodes[0].getnewaddress(): 4.998} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) rawtx = pad_raw_tx(rawtx) rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx) # This will raise an exception since there are missing inputs assert_raises_rpc_error( -25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex']) ##################################### # getrawtransaction with block hash # ##################################### # make a tx by sending then generate 2 blocks; block1 has the tx in it tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1) block1, block2 = self.nodes[2].generate(2) self.sync_all() # We should be able to get the raw transaction by providing the correct block gottx = self.nodes[0].getrawtransaction(tx, True, block1) assert_equal(gottx['txid'], tx) assert_equal(gottx['in_active_chain'], True) # We should not have the 'in_active_chain' flag when we don't provide a block gottx = self.nodes[0].getrawtransaction(tx, True) assert_equal(gottx['txid'], tx) assert 'in_active_chain' not in gottx # We should not get the tx if we provide an unrelated block assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2) # An invalid block hash should raise the correct errors assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, True) assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, "foobar") assert_raises_rpc_error(-8, "parameter 3 must be of length 64", self.nodes[0].getrawtransaction, tx, True, "abcd1234") assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000") # Undo the blocks and check in_active_chain self.nodes[0].invalidateblock(block1) gottx = self.nodes[0].getrawtransaction( txid=tx, verbose=True, blockhash=block1) assert_equal(gottx['in_active_chain'], False) self.nodes[0].reconsiderblock(block1) assert_equal(self.nodes[0].getbestblockhash(), block2) # # RAW TX MULTISIG TESTS # # # 2of2 test addr1 = self.nodes[2].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[2].getaddressinfo(addr1) addr2Obj = self.nodes[2].getaddressinfo(addr2) # Tests for createmultisig and addmultisigaddress assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"]) # createmultisig can only take public keys self.nodes[0].createmultisig( 2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here. assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) mSigObj = self.nodes[2].addmultisigaddress( 2, [addr1Obj['pubkey'], addr1])['address'] # use balance deltas instead of absolute values bal = self.nodes[2].getbalance() # send 1.2 BCH to msig adr txId = self.nodes[0].sendtoaddress(mSigObj, 1.2) self.sync_all() self.nodes[0].generate(1) self.sync_all() # node2 has both keys of the 2of2 ms addr., tx should affect the # balance assert_equal(self.nodes[2].getbalance(), bal + Decimal('1.20000000')) # 2of3 test from different nodes bal = self.nodes[2].getbalance() addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr3 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[1].getaddressinfo(addr1) addr2Obj = self.nodes[2].getaddressinfo(addr2) addr3Obj = self.nodes[2].getaddressinfo(addr3) mSigObj = self.nodes[2].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address'] txId = self.nodes[0].sendtoaddress(mSigObj, 2.2) decTx = self.nodes[0].gettransaction(txId) rawTx = self.nodes[0].decoderawtransaction(decTx['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() # THIS IS AN INCOMPLETE FEATURE # NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND # COUNT AT BALANCE CALCULATION # for now, assume the funds of a 2of3 multisig tx are not marked as # spendable assert_equal(self.nodes[2].getbalance(), bal) txDetails = self.nodes[0].gettransaction(txId, True) rawTx = self.nodes[0].decoderawtransaction(txDetails['hex']) vout = False for outpoint in rawTx['vout']: if outpoint['value'] == Decimal('2.20000000'): vout = outpoint break bal = self.nodes[0].getbalance() inputs = [{ "txid": txId, "vout": vout['n'], "scriptPubKey": vout['scriptPubKey']['hex'], "amount": vout['value'], }] outputs = {self.nodes[0].getnewaddress(): 2.19} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet( rawTx, inputs) # node1 only has one key, can't comp. sign the tx assert_equal(rawTxPartialSigned['complete'], False) rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs) # node2 can sign the tx compl., own two of three keys assert_equal(rawTxSigned['complete'], True) self.nodes[2].sendrawtransaction(rawTxSigned['hex']) rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() assert_equal(self.nodes[0].getbalance(), bal + Decimal( '50.00000000') + Decimal('2.19000000')) # block reward + tx rawTxBlock = self.nodes[0].getblock(self.nodes[0].getbestblockhash()) # 2of2 test for combining transactions bal = self.nodes[2].getbalance() addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[1].getaddressinfo(addr1) addr2Obj = self.nodes[2].getaddressinfo(addr2) self.nodes[1].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] mSigObj = self.nodes[2].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] mSigObjValid = self.nodes[2].getaddressinfo(mSigObj) txId = self.nodes[0].sendtoaddress(mSigObj, 2.2) decTx = self.nodes[0].gettransaction(txId) rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() # the funds of a 2of2 multisig tx should not be marked as spendable assert_equal(self.nodes[2].getbalance(), bal) txDetails = self.nodes[0].gettransaction(txId, True) rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex']) vout = False for outpoint in rawTx2['vout']: if outpoint['value'] == Decimal('2.20000000'): vout = outpoint break bal = self.nodes[0].getbalance() inputs = [{"txid": txId, "vout": vout['n'], "scriptPubKey": vout['scriptPubKey'] ['hex'], "redeemScript": mSigObjValid['hex'], "amount": vout['value']}] outputs = {self.nodes[0].getnewaddress(): 2.19} rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs) rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet( rawTx2, inputs) self.log.debug(rawTxPartialSigned1) # node1 only has one key, can't comp. sign the tx assert_equal(rawTxPartialSigned1['complete'], False) rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet( rawTx2, inputs) self.log.debug(rawTxPartialSigned2) # node2 only has one key, can't comp. sign the tx assert_equal(rawTxPartialSigned2['complete'], False) rawTxComb = self.nodes[2].combinerawtransaction( [rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']]) self.log.debug(rawTxComb) self.nodes[2].sendrawtransaction(rawTxComb) rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb) self.sync_all() self.nodes[0].generate(1) self.sync_all() assert_equal(self.nodes[0].getbalance( ), bal+Decimal('50.00000000')+Decimal('2.19000000')) # block reward + tx # getrawtransaction tests # 1. valid parameters - only supply txid txHash = rawTx["hash"] assert_equal( self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex']) # 2. valid parameters - supply txid and 0 for non-verbose assert_equal( self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex']) # 3. valid parameters - supply txid and False for non-verbose assert_equal(self.nodes[0].getrawtransaction( txHash, False), rawTxSigned['hex']) # 4. valid parameters - supply txid and 1 for verbose. # We only check the "hex" field of the output so we don't need to # update this test every time the output format changes. assert_equal(self.nodes[0].getrawtransaction( txHash, 1)["hex"], rawTxSigned['hex']) # 5. valid parameters - supply txid and True for non-verbose assert_equal(self.nodes[0].getrawtransaction( txHash, True)["hex"], rawTxSigned['hex']) # 6. invalid parameters - supply txid and string "Flase" assert_raises_rpc_error( -1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "False") # 7. invalid parameters - supply txid and empty array assert_raises_rpc_error( -1, "not a boolean", self.nodes[0].getrawtransaction, txHash, []) # 8. invalid parameters - supply txid and empty dict assert_raises_rpc_error( -1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {}) # Sanity checks on verbose getrawtransaction output rawTxOutput = self.nodes[0].getrawtransaction(txHash, True) assert_equal(rawTxOutput["hex"], rawTxSigned["hex"]) assert_equal(rawTxOutput["txid"], txHash) assert_equal(rawTxOutput["hash"], txHash) assert_greater_than(rawTxOutput["size"], 300) assert_equal(rawTxOutput["version"], 0x02) assert_equal(rawTxOutput["locktime"], 0) assert_equal(len(rawTxOutput["vin"]), 1) assert_equal(len(rawTxOutput["vout"]), 1) assert_equal(rawTxOutput["blockhash"], rawTxBlock["hash"]) assert_equal(rawTxOutput["confirmations"], 3) assert_equal(rawTxOutput["time"], rawTxBlock["time"]) assert_equal(rawTxOutput["blocktime"], rawTxBlock["time"]) inputs = [ {'txid': "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'sequence': 1000}] outputs = {self.nodes[0].getnewaddress(): 1} assert_raises_rpc_error( -8, 'Invalid parameter, missing vout key', self.nodes[0].createrawtransaction, inputs, outputs) inputs[0]['vout'] = "1" assert_raises_rpc_error( -8, 'Invalid parameter, vout must be a number', self.nodes[0].createrawtransaction, inputs, outputs) inputs[0]['vout'] = -1 assert_raises_rpc_error( -8, 'Invalid parameter, vout must be positive', self.nodes[0].createrawtransaction, inputs, outputs) inputs[0]['vout'] = 1 rawtx = self.nodes[0].createrawtransaction(inputs, outputs) decrawtx = self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['vin'][0]['sequence'], 1000) # 9. invalid parameters - sequence number out of range inputs[0]['sequence'] = -1 assert_raises_rpc_error( -8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs) # 10. invalid parameters - sequence number out of range inputs[0]['sequence'] = 4294967296 assert_raises_rpc_error( -8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs) inputs[0]['sequence'] = 4294967294 rawtx = self.nodes[0].createrawtransaction(inputs, outputs) decrawtx = self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['vin'][0]['sequence'], 4294967294) #################################### # TRANSACTION VERSION NUMBER TESTS # #################################### # Test the minimum transaction version number that fits in a signed 32-bit integer. tx = CTransaction() tx.nVersion = -0x80000000 rawtx = ToHex(tx) decrawtx = self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['version'], -0x80000000) # Test the maximum transaction version number that fits in a signed 32-bit integer. tx = CTransaction() tx.nVersion = 0x7fffffff rawtx = ToHex(tx) decrawtx = self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['version'], 0x7fffffff) if __name__ == '__main__': RawTransactionsTest().main() diff --git a/test/functional/rpc_signmessage.py b/test/functional/rpc_signmessage.py index 7c08600ff..4cec0eab2 100755 --- a/test/functional/rpc_signmessage.py +++ b/test/functional/rpc_signmessage.py @@ -1,42 +1,42 @@ #!/usr/bin/env python3 -# Copyright (c) 2016 The Bitcoin Core developers +# Copyright (c) 2016-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test RPC commands for signing and verifying messages.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class SignMessagesTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def run_test(self): message = 'This is just a test message' self.log.info('test signing with priv_key') priv_key = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N' address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB' expected_signature = 'IPDOIFcWd8LzOr70CXaal4+uG2ZZWcbHqutyGeO7AJ0MWbqq9C+u3KP9ScjtLzsIgY3st5n8XFQvgMZ0KrDQ9vg=' signature = self.nodes[0].signmessagewithprivkey(priv_key, message) assert_equal(expected_signature, signature) assert self.nodes[0].verifymessage(address, signature, message) self.log.info('test signing with an address with wallet') address = self.nodes[0].getnewaddress() signature = self.nodes[0].signmessage(address, message) assert self.nodes[0].verifymessage(address, signature, message) self.log.info('test verifying with another address should not work') other_address = self.nodes[0].getnewaddress() other_signature = self.nodes[0].signmessage(other_address, message) assert not self.nodes[0].verifymessage( other_address, signature, message) assert not self.nodes[0].verifymessage( address, other_signature, message) if __name__ == '__main__': SignMessagesTest().main() diff --git a/test/functional/rpc_signrawtransaction.py b/test/functional/rpc_signrawtransaction.py index 84c056407..a6e79ea08 100755 --- a/test/functional/rpc_signrawtransaction.py +++ b/test/functional/rpc_signrawtransaction.py @@ -1,239 +1,239 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test transaction signing using the signrawtransaction* RPCs.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error RPC_WALLET_NOT_SPECIFIED = "Wallet file not specified (must request wallet " + \ "RPC through /wallet/ uri-path)." class SignRawTransactionsTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [[], ["-wallet=w1", "-wallet=w2"]] def successful_signing_test(self): """Creates and signs a valid raw transaction with one input. Expected results: 1) The transaction has a complete set of signatures 2) No script verification error occurred""" privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA'] inputs = [ # Valid pay-to-pubkey scripts {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0, 'amount': 3.14159, 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}, {'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0, 'amount': '123.456', 'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'}, ] outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) rawTxSigned = self.nodes[0].signrawtransactionwithkey( rawTx, privKeys, inputs) # 1) The transaction has a complete set of signatures assert rawTxSigned['complete'] # 2) No script verification error occurred assert 'errors' not in rawTxSigned def script_verification_error_test(self): """Creates and signs a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script. Expected results: 3) The transaction has no complete set of signatures 4) Two script verification errors occurred 5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error") 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)""" privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N'] inputs = [ # Valid pay-to-pubkey script {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0, 'amount': 0}, # Invalid script {'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7, 'amount': '1.1'}, # Missing scriptPubKey {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1, 'amount': 2.0}, ] scripts = [ # Valid pay-to-pubkey script {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0, 'amount': 0, 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}, # Invalid script {'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7, 'amount': '1.1', 'scriptPubKey': 'badbadbadbad'} ] outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) # Make sure decoderawtransaction is at least marginally sane decodedRawTx = self.nodes[0].decoderawtransaction(rawTx) for i, inp in enumerate(inputs): assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"]) assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"]) # Make sure decoderawtransaction throws if there is extra data assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, rawTx + "00") rawTxSigned = self.nodes[0].signrawtransactionwithkey( rawTx, privKeys, scripts) # 3) The transaction has no complete set of signatures assert not rawTxSigned['complete'] # 4) Two script verification errors occurred assert 'errors' in rawTxSigned assert_equal(len(rawTxSigned['errors']), 2) # 5) Script verification errors have certain properties assert 'txid' in rawTxSigned['errors'][0] assert 'vout' in rawTxSigned['errors'][0] assert 'scriptSig' in rawTxSigned['errors'][0] assert 'sequence' in rawTxSigned['errors'][0] assert 'error' in rawTxSigned['errors'][0] # 6) The verification errors refer to the invalid (vin 1) and missing # input (vin 2) assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid']) assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout']) assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid']) assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout']) def test_sighashes(self): """Creates and signs a raw transaction with various sighashes. Expected result: 1) The transaction is complete if the sighash is valid and has FORKID. 2) The RPC throws an error if the sighash does not contain FORKID. 3) The RPC throws an error if the sighash is invalid.""" privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N'] inputs = [ # Valid pay-to-pubkey script {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0, 'amount': 3.14159, 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'} ] outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) valid_sighashes = [ "ALL|FORKID", "NONE|FORKID", "SINGLE|FORKID", "ALL|FORKID|ANYONECANPAY", "NONE|FORKID|ANYONECANPAY", "SINGLE|FORKID|ANYONECANPAY" ] no_forkid_sighashes = [ "ALL", "NONE", "SINGLE", "ALL|ANYONECANPAY", "NONE|ANYONECANPAY", "SINGLE|ANYONECANPAY" ] invalid_sighashes = [ "", "ALL|SINGLE|FORKID", str(0), str(0x20) ] # 1) If the sighash is valid with FORKID, the signature is complete for sighash in valid_sighashes: rawTxSigned = self.nodes[0].signrawtransactionwithkey( rawTx, privKeys, inputs, sighash) assert 'complete' in rawTxSigned assert_equal(rawTxSigned['complete'], True) assert 'errors' not in rawTxSigned # 2) If FORKID is missing in the sighash, the RPC throws an error for sighash in no_forkid_sighashes: assert_raises_rpc_error(-8, "Signature must use SIGHASH_FORKID", self.nodes[0].signrawtransactionwithkey, rawTx, privKeys, inputs, sighash) # 3) If the sighash is invalid the RPC throws an error for sighash in invalid_sighashes: assert_raises_rpc_error(-8, "Invalid sighash param", self.nodes[0].signrawtransactionwithkey, rawTx, privKeys, inputs, sighash) def multiwallet_signing_test(self): """Creates and signs a raw transaction with a multiwallet node. Expected results: 1) The transaction is not signed if no wallet is specified 2) The transaction is signed if the correct wallet URI is given""" inputs = [ # Valid pay-to-pubkey scripts {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0, 'amount': 3.14159, 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}, ] outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1} multiwallet_node = self.nodes[1] rawTx = multiwallet_node.createrawtransaction(inputs, outputs) # The multiwallet node cannot sign the transaction if no wallet is # specified assert_raises_rpc_error(-19, RPC_WALLET_NOT_SPECIFIED, multiwallet_node.signrawtransactionwithwallet, rawTx) # The multiwallet node can sign the transaction using w1 w1 = multiwallet_node.get_wallet_rpc('w1') w1.generate(101) utxo = w1.listunspent()[0] inputs = [{ 'txid': utxo['txid'], 'vout': utxo['vout'], }] rawTx_w1 = w1.createrawtransaction(inputs, outputs) rawTxSigned_w1 = w1.signrawtransactionwithwallet(rawTx_w1) assert rawTxSigned_w1['complete'] assert 'errors' not in rawTxSigned_w1 def run_test(self): self.successful_signing_test() self.script_verification_error_test() self.test_sighashes() self.multiwallet_signing_test() if __name__ == '__main__': SignRawTransactionsTest().main() diff --git a/test/functional/rpc_uptime.py b/test/functional/rpc_uptime.py index f7c9e4611..e86f91b1d 100755 --- a/test/functional/rpc_uptime.py +++ b/test/functional/rpc_uptime.py @@ -1,30 +1,30 @@ #!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the RPC call related to the uptime command. Test corresponds to code in rpc/server.cpp. """ import time from test_framework.test_framework import BitcoinTestFramework class UptimeTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def run_test(self): self._test_uptime() def _test_uptime(self): wait_time = 10 self.nodes[0].setmocktime(int(time.time() + wait_time)) assert self.nodes[0].uptime() >= wait_time if __name__ == '__main__': UptimeTest().main() diff --git a/test/functional/test_framework/address.py b/test/functional/test_framework/address.py index 0453a3a09..b769a10ea 100644 --- a/test/functional/test_framework/address.py +++ b/test/functional/test_framework/address.py @@ -1,66 +1,66 @@ #!/usr/bin/env python3 -# Copyright (c) 2016 The Bitcoin Core developers +# Copyright (c) 2016-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Encode and decode BASE58, P2PKH and P2SH addresses.""" from .script import CScript, hash160, hash256 from .util import hex_str_to_bytes chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' def byte_to_base58(b, version): result = '' str = b.hex() str = chr(version).encode('latin-1').hex() + str checksum = hash256(hex_str_to_bytes(str)).hex() str += checksum[:8] value = int('0x' + str, 0) while value > 0: result = chars[value % 58] + result value //= 58 while (str[:2] == '00'): result = chars[0] + result str = str[2:] return result # TODO: def base58_decode def keyhash_to_p2pkh(hash, main=False): assert (len(hash) == 20) version = 0 if main else 111 return byte_to_base58(hash, version) def scripthash_to_p2sh(hash, main=False): assert (len(hash) == 20) version = 5 if main else 196 return byte_to_base58(hash, version) def key_to_p2pkh(key, main=False): key = check_key(key) return keyhash_to_p2pkh(hash160(key), main) def script_to_p2sh(script, main=False): script = check_script(script) return scripthash_to_p2sh(hash160(script), main) def check_key(key): if (type(key) is str): key = hex_str_to_bytes(key) # Assuming this is hex string if (type(key) is bytes and (len(key) == 33 or len(key) == 65)): return key assert False def check_script(script): if (type(script) is str): script = hex_str_to_bytes(script) # Assuming this is hex string if (type(script) is bytes or type(script) is CScript): return script assert False diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index c51ae829e..1bf508f10 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -1,1330 +1,1330 @@ #!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik -# Copyright (c) 2010-2017 The Bitcoin Core developers +# Copyright (c) 2010-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Bitcoin test framework primitive and message structures CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....: data structures that should map to corresponding structures in bitcoin/primitives msg_block, msg_tx, msg_headers, etc.: data structures that represent network messages ser_*, deser_*: functions that handle serialization/deserialization. Classes use __slots__ to ensure extraneous attributes aren't accidentally added by tests, compromising their intended effect. """ from codecs import encode import copy import hashlib from io import BytesIO import random import socket import struct import time from test_framework.siphash import siphash256 from test_framework.util import hex_str_to_bytes MIN_VERSION_SUPPORTED = 60001 # past bip-31 for ping/pong MY_VERSION = 70014 MY_SUBVERSION = b"/python-mininode-tester:0.0.3/" # from version 70001 onwards, fRelay should be appended to version messages (BIP37) MY_RELAY = 1 MAX_INV_SZ = 50000 MAX_BLOCK_BASE_SIZE = 1000000 # 1 BCH in satoshis COIN = 100000000 NODE_NETWORK = (1 << 0) # NODE_GETUTXO = (1 << 1) NODE_BLOOM = (1 << 2) # NODE_WITNESS = (1 << 3) NODE_XTHIN = (1 << 4) NODE_BITCOIN_CASH = (1 << 5) NODE_NETWORK_LIMITED = (1 << 10) MSG_TX = 1 MSG_BLOCK = 2 MSG_TYPE_MASK = 0xffffffff >> 2 # Serialization/deserialization tools def sha256(s): return hashlib.new('sha256', s).digest() def ripemd160(s): return hashlib.new('ripemd160', s).digest() def hash256(s): return sha256(sha256(s)) def ser_compact_size(l): r = b"" if l < 253: r = struct.pack("B", l) elif l < 0x10000: r = struct.pack(">= 32 return rs def uint256_from_str(s): r = 0 t = struct.unpack("> 24) & 0xFF v = (c & 0xFFFFFF) << (8 * (nbytes - 3)) return v def deser_vector(f, c): nit = deser_compact_size(f) r = [] for i in range(nit): t = c() t.deserialize(f) r.append(t) return r # ser_function_name: Allow for an alternate serialization function on the # entries in the vector. def ser_vector(l, ser_function_name=None): r = ser_compact_size(len(l)) for i in l: if ser_function_name: r += getattr(i, ser_function_name)() else: r += i.serialize() return r def deser_uint256_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_uint256(f) r.append(t) return r def ser_uint256_vector(l): r = ser_compact_size(len(l)) for i in l: r += ser_uint256(i) return r def deser_string_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_string(f) r.append(t) return r def ser_string_vector(l): r = ser_compact_size(len(l)) for sv in l: r += ser_string(sv) return r # Deserialize from a hex string representation (eg from RPC) def FromHex(obj, hex_string): obj.deserialize(BytesIO(hex_str_to_bytes(hex_string))) return obj # Convert a binary-serializable object to hex (eg for submission via RPC) def ToHex(obj): return obj.serialize().hex() # Objects that map to bitcoind objects, which can be serialized/deserialized class CAddress: __slots__ = ("ip", "nServices", "pchReserved", "port", "time") def __init__(self): self.time = 0 self.nServices = 1 self.pchReserved = b"\x00" * 10 + b"\xff" * 2 self.ip = "0.0.0.0" self.port = 0 def deserialize(self, f, with_time=True): if with_time: self.time = struct.unpack("H", f.read(2))[0] def serialize(self, with_time=True): r = b"" if with_time: r += struct.pack("H", self.port) return r def __repr__(self): return "CAddress(nServices={} ip={} port={})".format( self.nServices, self.ip, self.port) class CInv: __slots__ = ("hash", "type") typemap = { 0: "Error", 1: "TX", 2: "Block", 4: "CompactBlock" } def __init__(self, t=0, h=0): self.type = t self.hash = h def deserialize(self, f): self.type = struct.unpack(" 21000000 * COIN: return False return True def __repr__(self): return "CTransaction(nVersion={} vin={} vout={} nLockTime={})".format( self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime) class CBlockHeader: __slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce", "nTime", "nVersion", "sha256") def __init__(self, header=None): if header is None: self.set_null() else: self.nVersion = header.nVersion self.hashPrevBlock = header.hashPrevBlock self.hashMerkleRoot = header.hashMerkleRoot self.nTime = header.nTime self.nBits = header.nBits self.nNonce = header.nNonce self.sha256 = header.sha256 self.hash = header.hash self.calc_sha256() def set_null(self): self.nVersion = 1 self.hashPrevBlock = 0 self.hashMerkleRoot = 0 self.nTime = 0 self.nBits = 0 self.nNonce = 0 self.sha256 = None self.hash = None def deserialize(self, f): self.nVersion = struct.unpack(" 1: newhashes = [] for i in range(0, len(hashes), 2): i2 = min(i + 1, len(hashes) - 1) newhashes.append(hash256(hashes[i] + hashes[i2])) hashes = newhashes return uint256_from_str(hashes[0]) def calc_merkle_root(self): hashes = [] for tx in self.vtx: tx.calc_sha256() hashes.append(ser_uint256(tx.sha256)) return self.get_merkle_root(hashes) def is_valid(self): self.calc_sha256() target = uint256_from_compact(self.nBits) if self.sha256 > target: return False for tx in self.vtx: if not tx.is_valid(): return False if self.calc_merkle_root() != self.hashMerkleRoot: return False return True def solve(self): self.rehash() target = uint256_from_compact(self.nBits) while self.sha256 > target: self.nNonce += 1 self.rehash() def __repr__(self): return "CBlock(nVersion={} hashPrevBlock={:064x} hashMerkleRoot={:064x} nTime={} nBits={:08x} nNonce={:08x} vtx={})".format( self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx)) class PrefilledTransaction: __slots__ = ("index", "tx") def __init__(self, index=0, tx=None): self.index = index self.tx = tx def deserialize(self, f): self.index = deser_compact_size(f) self.tx = CTransaction() self.tx.deserialize(f) def serialize(self): r = b"" r += ser_compact_size(self.index) r += self.tx.serialize() return r def __repr__(self): return "PrefilledTransaction(index={}, tx={})".format( self.index, repr(self.tx)) # This is what we send on the wire, in a cmpctblock message. class P2PHeaderAndShortIDs: __slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length", "shortids", "shortids_length") def __init__(self): self.header = CBlockHeader() self.nonce = 0 self.shortids_length = 0 self.shortids = [] self.prefilled_txn_length = 0 self.prefilled_txn = [] def deserialize(self, f): self.header.deserialize(f) self.nonce = struct.unpack("= 106: self.addrFrom = CAddress() self.addrFrom.deserialize(f, False) self.nNonce = struct.unpack("= 209: self.nStartingHeight = struct.unpack("= 70001: # Relay field is optional for version 70001 onwards try: self.nRelay = struct.unpack(" class msg_headers: __slots__ = ("headers",) command = b"headers" def __init__(self, headers=None): self.headers = headers if headers is not None else [] def deserialize(self, f): # comment in bitcoind indicates these should be deserialized as blocks blocks = deser_vector(f, CBlock) for x in blocks: self.headers.append(CBlockHeader(x)) def serialize(self): blocks = [CBlock(x) for x in self.headers] return ser_vector(blocks) def __repr__(self): return "msg_headers(headers={})".format(repr(self.headers)) class msg_reject: __slots__ = ("code", "data", "message", "reason") command = b"reject" REJECT_MALFORMED = 1 def __init__(self): self.message = b"" self.code = 0 self.reason = b"" self.data = 0 def deserialize(self, f): self.message = deser_string(f) self.code = struct.unpack(" 0: self.recvbuf += t while True: msg = self._on_data() if msg == None: break self.on_message(msg) def _on_data(self): """Try to read P2P messages from the recv buffer. This method reads data from the buffer in a loop. It deserializes, parses and verifies the P2P header, then passes the P2P payload to the on_message callback for processing.""" try: with mininode_lock: if len(self.recvbuf) < 4: return None if self.recvbuf[:4] != MAGIC_BYTES[self.network]: raise ValueError( "got garbage {}".format(repr(self.recvbuf))) if len(self.recvbuf) < 4 + 12 + 4 + 4: return None command = self.recvbuf[4:4+12].split(b"\x00", 1)[0] msglen = struct.unpack(" 500: log_message += "... (msg truncated)" logger.debug(log_message) class P2PInterface(P2PConnection): """A high-level P2P interface class for communicating with a Bitcoin Cash node. This class provides high-level callbacks for processing P2P message payloads, as well as convenience methods for interacting with the node over P2P. Individual testcases should subclass this and override the on_* methods if they want to alter message handling behaviour.""" def __init__(self): super().__init__() # Track number of messages of each type received and the most recent # message of each type self.message_count = defaultdict(int) self.last_message = {} # A count of the number of ping messages we've sent to the node self.ping_counter = 1 # The network services received from the peer self.nServices = 0 def peer_connect(self, *args, services=NODE_NETWORK, send_version=True, **kwargs): create_conn = super().peer_connect(*args, **kwargs) if send_version: # Send a version msg vt = msg_version() vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" vt.addrFrom.port = 0 # Will be sent soon after connection_made self.on_connection_send_msg = vt return create_conn # Message receiving methods def on_message(self, message): """Receive message and dispatch message to appropriate callback. We keep a count of how many of each message type has been received and the most recent message of each type.""" with mininode_lock: try: command = message.command.decode('ascii') self.message_count[command] += 1 self.last_message[command] = message getattr(self, 'on_' + command)(message) except: print("ERROR delivering {} ({})".format( repr(message), sys.exc_info()[0])) raise # Callback methods. Can be overridden by subclasses in individual test # cases to provide custom message handling behaviour. def on_open(self): pass def on_close(self): pass def on_addr(self, message): pass def on_block(self, message): pass def on_blocktxn(self, message): pass def on_cmpctblock(self, message): pass def on_feefilter(self, message): pass def on_getaddr(self, message): pass def on_getblocks(self, message): pass def on_getblocktxn(self, message): pass def on_getdata(self, message): pass def on_getheaders(self, message): pass def on_headers(self, message): pass def on_mempool(self, message): pass def on_notfound(self, message): pass def on_pong(self, message): pass def on_reject(self, message): pass def on_sendcmpct(self, message): pass def on_sendheaders(self, message): pass def on_tx(self, message): pass def on_inv(self, message): want = msg_getdata() for i in message.inv: if i.type != 0: want.inv.append(i) if len(want.inv): self.send_message(want) def on_ping(self, message): self.send_message(msg_pong(message.nonce)) def on_verack(self, message): self.verack_received = True def on_version(self, message): assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format( message.nVersion, MIN_VERSION_SUPPORTED) self.send_message(msg_verack()) self.nServices = message.nServices # Connection helper methods def wait_for_disconnect(self, timeout=60): def test_function(): return not self.is_connected wait_until(test_function, timeout=timeout, lock=mininode_lock) # Message receiving helper methods def wait_for_block(self, blockhash, timeout=60): def test_function(): return self.last_message.get( "block") and self.last_message["block"].block.rehash() == blockhash wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_getdata(self, timeout=60): """Waits for a getdata message. Receiving any getdata message will satisfy the predicate. the last_message["getdata"] value must be explicitly cleared before calling this method, or this will return immediately with success. TODO: change this method to take a hash value and only return true if the correct block/tx has been requested.""" def test_function(): return self.last_message.get("getdata") wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_getheaders(self, timeout=60): """Waits for a getheaders message. Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"] value must be explicitly cleared before calling this method, or this will return immediately with success. TODO: change this method to take a hash value and only return true if the correct block header has been requested.""" def test_function(): return self.last_message.get("getheaders") wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_inv(self, expected_inv, timeout=60): """Waits for an INV message and checks that the first inv object in the message was as expected.""" if len(expected_inv) > 1: raise NotImplementedError( "wait_for_inv() will only verify the first inv object") def test_function(): return self.last_message.get("inv") and \ self.last_message["inv"].inv[0].type == expected_inv[0].type and \ self.last_message["inv"].inv[0].hash == expected_inv[0].hash wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_verack(self, timeout=60): def test_function(): return self.message_count["verack"] wait_until(test_function, timeout=timeout, lock=mininode_lock) # Message sending helper functions def send_and_ping(self, message): self.send_message(message) self.sync_with_ping() # Sync up with the node def sync_with_ping(self, timeout=60): self.send_message(msg_ping(nonce=self.ping_counter)) def test_function(): if not self.last_message.get("pong"): return False return self.last_message["pong"].nonce == self.ping_counter wait_until(test_function, timeout=timeout, lock=mininode_lock) self.ping_counter += 1 # One lock for synchronizing all data access between the networking thread (see # NetworkThread below) and the thread running the test logic. For simplicity, # P2PConnection acquires this lock whenever delivering a message to a P2PInterface. # This lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the P2PInterface or P2PConnection. mininode_lock = threading.RLock() class NetworkThread(threading.Thread): network_event_loop = None def __init__(self): super().__init__(name="NetworkThread") # There is only one event loop and no more than one thread must be created assert not self.network_event_loop NetworkThread.network_event_loop = asyncio.new_event_loop() def run(self): """Start the network thread.""" self.network_event_loop.run_forever() def close(self, timeout=10): """Close the connections and network event loop.""" self.network_event_loop.call_soon_threadsafe( self.network_event_loop.stop) wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout) self.network_event_loop.close() self.join(timeout) class P2PDataStore(P2PInterface): """A P2P data store class. Keeps a block and transaction store and responds correctly to getdata and getheaders requests.""" def __init__(self): super().__init__() # store of blocks. key is block hash, value is a CBlock object self.block_store = {} self.last_block_hash = '' # store of txs. key is txid, value is a CTransaction object self.tx_store = {} self.getdata_requests = [] def on_getdata(self, message): """Check for the tx/block in our stores and if found, reply with an inv message.""" for inv in message.inv: self.getdata_requests.append(inv.hash) if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys(): self.send_message(msg_tx(self.tx_store[inv.hash])) elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys(): self.send_message(msg_block(self.block_store[inv.hash])) else: logger.debug( 'getdata message type {} received.'.format(hex(inv.type))) def on_getheaders(self, message): """Search back through our block store for the locator, and reply with a headers message if found.""" locator, hash_stop = message.locator, message.hashstop # Assume that the most recent block added is the tip if not self.block_store: return headers_list = [self.block_store[self.last_block_hash]] maxheaders = 2000 while headers_list[-1].sha256 not in locator.vHave: # Walk back through the block store, adding headers to headers_list # as we go. prev_block_hash = headers_list[-1].hashPrevBlock if prev_block_hash in self.block_store: prev_block_header = CBlockHeader( self.block_store[prev_block_hash]) headers_list.append(prev_block_header) if prev_block_header.sha256 == hash_stop: # if this is the hashstop header, stop here break else: logger.debug('block hash {} not found in block store'.format( hex(prev_block_hash))) break # Truncate the list if there are too many headers headers_list = headers_list[:-maxheaders - 1:-1] response = msg_headers(headers_list) if response is not None: self.send_message(response) def send_blocks_and_test(self, blocks, node, *, success=True, request_block=True, reject_reason=None, expect_disconnect=False, timeout=60): """Send blocks to test node and test whether the tip advances. - add all blocks to our block_store - send a headers message for the final block - the on_getheaders handler will ensure that any getheaders are responded to - if request_block is True: wait for getdata for each of the blocks. The on_getdata handler will ensure that any getdata messages are responded to - if success is True: assert that the node's tip advances to the most recent block - if success is False: assert that the node's tip doesn't advance - if reject_reason is set: assert that the correct reject message is logged""" with mininode_lock: for block in blocks: self.block_store[block.sha256] = block self.last_block_hash = block.sha256 reject_reason = [reject_reason] if reject_reason else [] with node.assert_debug_log(expected_msgs=reject_reason): self.send_message(msg_headers([CBlockHeader(blocks[-1])])) if request_block: wait_until( lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock) if expect_disconnect: self.wait_for_disconnect() else: self.sync_with_ping() if success: wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout) else: assert node.getbestblockhash() != blocks[-1].hash def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None): """Send txs to test node and test whether they're accepted to the mempool. - add all txs to our tx_store - send tx messages for all txs - if success is True/False: assert that the txs are/are not accepted to the mempool - if expect_disconnect is True: Skip the sync with ping - if reject_reason is set: assert that the correct reject message is logged.""" with mininode_lock: for tx in txs: self.tx_store[tx.sha256] = tx reject_reason = [reject_reason] if reject_reason else [] with node.assert_debug_log(expected_msgs=reject_reason): for tx in txs: self.send_message(msg_tx(tx)) if expect_disconnect: self.wait_for_disconnect() else: self.sync_with_ping() raw_mempool = node.getrawmempool() if success: # Check that all txs are now in the mempool for tx in txs: assert tx.hash in raw_mempool, "{} not found in mempool".format( tx.hash) else: # Check that none of the txs are now in the mempool for tx in txs: assert tx.hash not in raw_mempool, "{} tx found in mempool".format( tx.hash) diff --git a/test/functional/test_framework/netutil.py b/test/functional/test_framework/netutil.py index e44acb6bc..e5fcad1a0 100644 --- a/test/functional/test_framework/netutil.py +++ b/test/functional/test_framework/netutil.py @@ -1,168 +1,168 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Linux network utilities. Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal """ import array from binascii import unhexlify import fcntl import os import socket import struct import sys # STATE_ESTABLISHED = '01' # STATE_SYN_SENT = '02' # STATE_SYN_RECV = '03' # STATE_FIN_WAIT1 = '04' # STATE_FIN_WAIT2 = '05' # STATE_TIME_WAIT = '06' # STATE_CLOSE = '07' # STATE_CLOSE_WAIT = '08' # STATE_LAST_ACK = '09' STATE_LISTEN = '0A' # STATE_CLOSING = '0B' def get_socket_inodes(pid): ''' Get list of socket inodes for process pid. ''' base = '/proc/{}/fd'.format(pid) inodes = [] for item in os.listdir(base): target = os.readlink(os.path.join(base, item)) if target.startswith('socket:'): inodes.append(int(target[8:-1])) return inodes def _remove_empty(array): return [x for x in array if x != ''] def _convert_ip_port(array): host, port = array.split(':') # convert host from mangled-per-four-bytes form as used by kernel host = unhexlify(host) host_out = '' for x in range(0, len(host) // 4): (val,) = struct.unpack('=I', host[x * 4:(x + 1) * 4]) host_out += '{:08x}'.format(val) return host_out, int(port, 16) def netstat(typ='tcp'): ''' Function to return a list with status of tcp connections at linux systems To get pid of all network process running on system, you must run this script as superuser ''' with open('/proc/net/' + typ, 'r', encoding='utf8') as f: content = f.readlines() content.pop(0) result = [] for line in content: # Split lines and remove empty spaces. line_array = _remove_empty(line.split(' ')) tcp_id = line_array[0] l_addr = _convert_ip_port(line_array[1]) r_addr = _convert_ip_port(line_array[2]) state = line_array[3] # Need the inode to match with process pid. inode = int(line_array[9]) nline = [tcp_id, l_addr, r_addr, state, inode] result.append(nline) return result def get_bind_addrs(pid): ''' Get bind addresses as (host,port) tuples for process pid. ''' inodes = get_socket_inodes(pid) bind_addrs = [] for conn in netstat('tcp') + netstat('tcp6'): if conn[3] == STATE_LISTEN and conn[4] in inodes: bind_addrs.append(conn[1]) return bind_addrs # from: http://code.activestate.com/recipes/439093/ def all_interfaces(): ''' Return all interfaces that are up ''' is_64bits = sys.maxsize > 2**32 struct_size = 40 if is_64bits else 32 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) max_possible = 8 # initial value while True: bytes = max_possible * struct_size names = array.array('B', b'\0' * bytes) outbytes = struct.unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, # SIOCGIFCONF struct.pack('iL', bytes, names.buffer_info()[0]) ))[0] if outbytes == bytes: max_possible *= 2 else: break namestr = names.tobytes() return [(namestr[i:i + 16].split(b'\0', 1)[0], socket.inet_ntoa(namestr[i + 20:i + 24])) for i in range(0, outbytes, struct_size)] def addr_to_hex(addr): ''' Convert string IPv4 or IPv6 address to binary address as returned by get_bind_addrs. Very naive implementation that certainly doesn't work for all IPv6 variants. ''' if '.' in addr: # IPv4 addr = [int(x) for x in addr.split('.')] elif ':' in addr: # IPv6 sub = [[], []] # prefix, suffix x = 0 addr = addr.split(':') for i, comp in enumerate(addr): if comp == '': # skip empty component at beginning or end if i == 0 or i == (len(addr) - 1): continue x += 1 # :: skips to suffix assert x < 2 else: # two bytes per component val = int(comp, 16) sub[x].append(val >> 8) sub[x].append(val & 0xff) nullbytes = 16 - len(sub[0]) - len(sub[1]) assert (x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0) addr = sub[0] + ([0] * nullbytes) + sub[1] else: raise ValueError('Could not parse address {}'.format(addr)) return bytearray(addr).hex() def test_ipv6_local(): ''' Check for (local) IPv6 support. ''' import socket # By using SOCK_DGRAM this will not actually make a connection, but it will # fail if there is no route to IPv6 localhost. have_ipv6 = True try: s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) s.connect(('::1', 0)) except socket.error: have_ipv6 = False return have_ipv6 diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index c2621b9d4..fa820f4a1 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -1,724 +1,724 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Functionality to build scripts, as well as SignatureHash(). This file is modified from python-bitcoinlib. """ from .bignum import bn2vch import hashlib import struct from .messages import ( CTransaction, CTxOut, hash256, ser_string, ser_uint256, sha256, uint256_from_str, ) MAX_SCRIPT_ELEMENT_SIZE = 520 OPCODE_NAMES = {} def hash160(s): return hashlib.new('ripemd160', sha256(s)).digest() _opcode_instances = [] class CScriptOp(int): """A single script opcode""" __slots__ = () @staticmethod def encode_op_pushdata(d): """Encode a PUSHDATA op, returning bytes""" if len(d) < 0x4c: # OP_PUSHDATA return b'' + bytes([len(d)]) + d elif len(d) <= 0xff: # OP_PUSHDATA1 return b'\x4c' + bytes([len(d)]) + d elif len(d) <= 0xffff: return b'\x4d' + struct.pack(b'>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) elif neg: r[-1] |= 0x80 return bytes([len(r)]) + r class CScript(bytes): """Serialized script A bytes subclass, so you can use this directly whenever bytes are accepted. Note that this means that indexing does *not* work - you'll get an index by byte rather than opcode. This format was chosen for efficiency so that the general case would not require creating a lot of little CScriptOP objects. iter(script) however does iterate by opcode. """ __slots__ = () @classmethod def __coerce_instance(cls, other): # Coerce other into bytes if isinstance(other, CScriptOp): other = bytes([other]) elif isinstance(other, CScriptNum): if (other.value == 0): other = bytes([CScriptOp(OP_0)]) else: other = CScriptNum.encode(other) elif isinstance(other, int): if 0 <= other <= 16: other = bytes([CScriptOp.encode_op_n(other)]) elif other == -1: other = bytes([OP_1NEGATE]) else: other = CScriptOp.encode_op_pushdata(bn2vch(other)) elif isinstance(other, (bytes, bytearray)): other = CScriptOp.encode_op_pushdata(other) return other def __add__(self, other): # Do the coercion outside of the try block so that errors in it are # noticed. other = self.__coerce_instance(other) try: # bytes.__add__ always returns bytes instances unfortunately return CScript(super(CScript, self).__add__(other)) except TypeError: raise TypeError( 'Can not add a {!r} instance to a CScript'.format(other.__class__)) def join(self, iterable): # join makes no sense for a CScript() raise NotImplementedError def __new__(cls, value=b''): if isinstance(value, bytes) or isinstance(value, bytearray): return super(CScript, cls).__new__(cls, value) else: def coerce_iterable(iterable): for instance in iterable: yield cls.__coerce_instance(instance) # Annoyingly on both python2 and python3 bytes.join() always # returns a bytes instance even when subclassed. return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value))) def raw_iter(self): """Raw iteration Yields tuples of (opcode, data, sop_idx) so that the different possible PUSHDATA encodings can be accurately distinguished, as well as determining the exact opcode byte indexes. (sop_idx) """ i = 0 while i < len(self): sop_idx = i opcode = self[i] i += 1 if opcode > OP_PUSHDATA4: yield (opcode, None, sop_idx) else: datasize = None pushdata_type = None if opcode < OP_PUSHDATA1: pushdata_type = 'PUSHDATA({})'.format(opcode) datasize = opcode elif opcode == OP_PUSHDATA1: pushdata_type = 'PUSHDATA1' if i >= len(self): raise CScriptInvalidError( 'PUSHDATA1: missing data length') datasize = self[i] i += 1 elif opcode == OP_PUSHDATA2: pushdata_type = 'PUSHDATA2' if i + 1 >= len(self): raise CScriptInvalidError( 'PUSHDATA2: missing data length') datasize = self[i] + (self[i + 1] << 8) i += 2 elif opcode == OP_PUSHDATA4: pushdata_type = 'PUSHDATA4' if i + 3 >= len(self): raise CScriptInvalidError( 'PUSHDATA4: missing data length') datasize = self[i] + (self[i + 1] << 8) + \ (self[i + 2] << 16) + (self[i + 3] << 24) i += 4 else: assert False # shouldn't happen data = bytes(self[i:i + datasize]) # Check for truncation if len(data) < datasize: raise CScriptTruncatedPushDataError( '{}: truncated data'.format(pushdata_type, data)) i += datasize yield (opcode, data, sop_idx) def __iter__(self): """'Cooked' iteration Returns either a CScriptOP instance, an integer, or bytes, as appropriate. See raw_iter() if you need to distinguish the different possible PUSHDATA encodings. """ for (opcode, data, sop_idx) in self.raw_iter(): if data is not None: yield data else: opcode = CScriptOp(opcode) if opcode.is_small_int(): yield opcode.decode_op_n() else: yield CScriptOp(opcode) def __repr__(self): def _repr(o): if isinstance(o, bytes): return "x('{}')".format(o.hex()) else: return repr(o) ops = [] i = iter(self) while True: op = None try: op = _repr(next(i)) except CScriptTruncatedPushDataError as err: op = '{}...'.format(_repr(err.data), err) break except CScriptInvalidError as err: op = ''.format(err) break except StopIteration: break finally: if op is not None: ops.append(op) return "CScript([{}])".format(', '.join(ops)) def GetSigOpCount(self, fAccurate): """Get the SigOp count. fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details. Note that this is consensus-critical. """ n = 0 lastOpcode = OP_INVALIDOPCODE for (opcode, data, sop_idx) in self.raw_iter(): if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY): n += 1 elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): if fAccurate and (OP_1 <= lastOpcode <= OP_16): n += opcode.decode_op_n() else: n += 20 lastOpcode = opcode return n SIGHASH_ALL = 1 SIGHASH_NONE = 2 SIGHASH_SINGLE = 3 SIGHASH_FORKID = 0x40 SIGHASH_ANYONECANPAY = 0x80 def FindAndDelete(script, sig): """Consensus critical, see FindAndDelete() in Satoshi codebase""" r = b'' last_sop_idx = sop_idx = 0 skip = True for (opcode, data, sop_idx) in script.raw_iter(): if not skip: r += script[last_sop_idx:sop_idx] last_sop_idx = sop_idx if script[sop_idx:sop_idx + len(sig)] == sig: skip = True else: skip = False if not skip: r += script[last_sop_idx:] return CScript(r) def SignatureHash(script, txTo, inIdx, hashtype): """Consensus-correct SignatureHash Returns (hash, err) to precisely match the consensus-critical behavior of the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity) """ HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' if inIdx >= len(txTo.vin): return (HASH_ONE, "inIdx {} out of range ({})".format(inIdx, len(txTo.vin))) txtmp = CTransaction(txTo) for txin in txtmp.vin: txin.scriptSig = b'' txtmp.vin[inIdx].scriptSig = FindAndDelete( script, CScript([OP_CODESEPARATOR])) if (hashtype & 0x1f) == SIGHASH_NONE: txtmp.vout = [] for i in range(len(txtmp.vin)): if i != inIdx: txtmp.vin[i].nSequence = 0 elif (hashtype & 0x1f) == SIGHASH_SINGLE: outIdx = inIdx if outIdx >= len(txtmp.vout): return (HASH_ONE, "outIdx {} out of range ({})".format(outIdx, len(txtmp.vout))) tmp = txtmp.vout[outIdx] txtmp.vout = [] for i in range(outIdx): txtmp.vout.append(CTxOut(-1)) txtmp.vout.append(tmp) for i in range(len(txtmp.vin)): if i != inIdx: txtmp.vin[i].nSequence = 0 if hashtype & SIGHASH_ANYONECANPAY: tmp = txtmp.vin[inIdx] txtmp.vin = [] txtmp.vin.append(tmp) s = txtmp.serialize() s += struct.pack(b" 0: d = s.recv(n) if not d: raise IOError('Unexpected end of stream') rv.extend(d) n -= len(d) return rv # Implementation classes class Socks5Configuration(): """Proxy configuration.""" def __init__(self): self.addr = None # Bind address (must be set) self.af = socket.AF_INET # Bind address family self.unauth = False # Support unauthenticated self.auth = False # Support authentication class Socks5Command(): """Information about an incoming socks5 command.""" def __init__(self, cmd, atyp, addr, port, username, password): self.cmd = cmd # Command (one of Command.*) self.atyp = atyp # Address type (one of AddressType.*) self.addr = addr # Address self.port = port # Port to connect to self.username = username self.password = password def __repr__(self): return 'Socks5Command({},{},{},{},{},{})'.format( self.cmd, self.atyp, self.addr, self.port, self.username, self.password) class Socks5Connection(): def __init__(self, serv, conn, peer): self.serv = serv self.conn = conn self.peer = peer def handle(self): """Handle socks5 request according to RFC192.""" try: # Verify socks version ver = recvall(self.conn, 1)[0] if ver != 0x05: raise IOError('Invalid socks version {}'.format(ver)) # Choose authentication method nmethods = recvall(self.conn, 1)[0] methods = bytearray(recvall(self.conn, nmethods)) method = None if 0x02 in methods and self.serv.conf.auth: method = 0x02 # username/password elif 0x00 in methods and self.serv.conf.unauth: method = 0x00 # unauthenticated if method is None: raise IOError('No supported authentication method was offered') # Send response self.conn.sendall(bytearray([0x05, method])) # Read authentication (optional) username = None password = None if method == 0x02: ver = recvall(self.conn, 1)[0] if ver != 0x01: raise IOError('Invalid auth packet version {}'.format(ver)) ulen = recvall(self.conn, 1)[0] username = str(recvall(self.conn, ulen)) plen = recvall(self.conn, 1)[0] password = str(recvall(self.conn, plen)) # Send authentication response self.conn.sendall(bytearray([0x01, 0x00])) # Read connect request ver, cmd, _, atyp = recvall(self.conn, 4) if ver != 0x05: raise IOError( 'Invalid socks version {} in connect request'.format(ver)) if cmd != Command.CONNECT: raise IOError( 'Unhandled command {} in connect request'.format(cmd)) if atyp == AddressType.IPV4: addr = recvall(self.conn, 4) elif atyp == AddressType.DOMAINNAME: n = recvall(self.conn, 1)[0] addr = recvall(self.conn, n) elif atyp == AddressType.IPV6: addr = recvall(self.conn, 16) else: raise IOError('Unknown address type {}'.format(atyp)) port_hi, port_lo = recvall(self.conn, 2) port = (port_hi << 8) | port_lo # Send dummy response self.conn.sendall( bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])) cmdin = Socks5Command(cmd, atyp, addr, port, username, password) self.serv.queue.put(cmdin) logger.info('Proxy: {}'.format(cmdin)) # Fall through to disconnect except Exception as e: logger.exception("socks5 request handling failed.") self.serv.queue.put(e) finally: self.conn.close() class Socks5Server(): def __init__(self, conf): self.conf = conf self.s = socket.socket(conf.af) self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.s.bind(conf.addr) self.s.listen(5) self.running = False self.thread = None self.queue = queue.Queue() # report connections and exceptions to client def run(self): while self.running: (sockconn, peer) = self.s.accept() if self.running: conn = Socks5Connection(self, sockconn, peer) thread = threading.Thread(None, conn.handle) thread.daemon = True thread.start() def start(self): assert not self.running self.running = True self.thread = threading.Thread(None, self.run) self.thread.daemon = True self.thread.start() def stop(self): self.running = False # connect to self to end run loop s = socket.socket(self.conf.af) s.connect(self.conf.addr) s.close() self.thread.join() diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index bff941258..d3f660da1 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -1,477 +1,477 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Base class for RPC testing.""" import argparse import configparser from enum import Enum import logging import os import pdb import shutil import sys import tempfile import time from .authproxy import JSONRPCException from . import coverage from .test_node import TestNode from .mininode import NetworkThread from .util import ( assert_equal, check_json_precision, connect_nodes_bi, disconnect_nodes, get_datadir_path, initialize_datadir, MAX_NODES, p2p_port, PortSeed, rpc_port, set_node_times, sync_blocks, sync_mempools, ) class TestStatus(Enum): PASSED = 1 FAILED = 2 SKIPPED = 3 TEST_EXIT_PASSED = 0 TEST_EXIT_FAILED = 1 TEST_EXIT_SKIPPED = 77 # Timestamp is 01.01.2019 TIMESTAMP_IN_THE_PAST = 1546300800 class BitcoinTestFramework(): """Base class for a bitcoin test script. Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods. Individual tests can also override the following methods to customize the test setup: - add_options() - setup_chain() - setup_network() - setup_nodes() The __init__() and main() methods should not be overridden. This class also contains various public and private helper methods.""" def __init__(self): """Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method""" self.setup_clean_chain = False self.nodes = [] self.network_thread = None self.mocktime = 0 self.supports_cli = False self.bind_to_localhost_only = True def main(self): """Main function. This should not be overridden by the subclass test scripts.""" parser = argparse.ArgumentParser(usage="%(prog)s [options]") parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true", help="Leave bitcoinds and test.* datadir on exit or error") parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true", help="Don't stop bitcoinds after the test execution") parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"), help="Directory for caching pregenerated datadirs (default: %(default)s)") parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs") parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO", help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true", help="Print out all RPC calls as they are made") parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int, help="The seed to use for assigning port numbers (default: current process id)") parser.add_argument("--coveragedir", dest="coveragedir", help="Write tested RPC commands into this directory") parser.add_argument("--configfile", dest="configfile", default=os.path.abspath(os.path.dirname(os.path.realpath( __file__)) + "/../../config.ini"), help="Location of the test framework config file (default: %(default)s)") parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true", help="Attach a python debugger if test fails") parser.add_argument("--usecli", dest="usecli", default=False, action="store_true", help="use bitcoin-cli instead of RPC for all commands") parser.add_argument("--with-gravitonactivation", dest="gravitonactivation", default=False, action="store_true", help="Activate graviton update on timestamp {}".format(TIMESTAMP_IN_THE_PAST)) self.add_options(parser) self.options = parser.parse_args() self.set_test_params() assert hasattr( self, "num_nodes"), "Test must set self.num_nodes in set_test_params()" PortSeed.n = self.options.port_seed check_json_precision() self.options.cachedir = os.path.abspath(self.options.cachedir) config = configparser.ConfigParser() config.read_file(open(self.options.configfile, encoding='utf-8')) self.options.bitcoind = os.getenv( "BITCOIND", default=config["environment"]["BUILDDIR"] + '/src/bitcoind' + config["environment"]["EXEEXT"]) self.options.bitcoincli = os.getenv( "BITCOINCLI", default=config["environment"]["BUILDDIR"] + '/src/bitcoin-cli' + config["environment"]["EXEEXT"]) os.environ['PATH'] = config['environment']['BUILDDIR'] + os.pathsep + \ config['environment']['BUILDDIR'] + os.path.sep + "qt" + os.pathsep + \ os.environ['PATH'] # Set up temp directory and start logging if self.options.tmpdir: self.options.tmpdir = os.path.abspath(self.options.tmpdir) os.makedirs(self.options.tmpdir, exist_ok=False) else: self.options.tmpdir = tempfile.mkdtemp(prefix="test") self._start_logging() self.log.debug('Setting up network thread') self.network_thread = NetworkThread() self.network_thread.start() success = TestStatus.FAILED try: if self.options.usecli and not self.supports_cli: raise SkipTest( "--usecli specified but test does not support using CLI") self.setup_chain() self.setup_network() self.run_test() success = TestStatus.PASSED except JSONRPCException: self.log.exception("JSONRPC error") except SkipTest as e: self.log.warning("Test Skipped: {}".format(e.message)) success = TestStatus.SKIPPED except AssertionError: self.log.exception("Assertion failed") except KeyError: self.log.exception("Key error") except Exception: self.log.exception("Unexpected exception caught during testing") except KeyboardInterrupt: self.log.warning("Exiting after keyboard interrupt") if success == TestStatus.FAILED and self.options.pdbonfailure: print("Testcase failed. Attaching python debugger. Enter ? for help") pdb.set_trace() self.log.debug('Closing down network thread') self.network_thread.close() if not self.options.noshutdown: self.log.info("Stopping nodes") if self.nodes: self.stop_nodes() else: for node in self.nodes: node.cleanup_on_exit = False self.log.info( "Note: bitcoinds were not stopped and may still be running") if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED: self.log.info("Cleaning up {} on exit".format(self.options.tmpdir)) cleanup_tree_on_exit = True else: self.log.warning( "Not cleaning up dir {}".format(self.options.tmpdir)) cleanup_tree_on_exit = False if success == TestStatus.PASSED: self.log.info("Tests successful") exit_code = TEST_EXIT_PASSED elif success == TestStatus.SKIPPED: self.log.info("Test skipped") exit_code = TEST_EXIT_SKIPPED else: self.log.error( "Test failed. Test logging available at {}/test_framework.log".format(self.options.tmpdir)) self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath( os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir)) exit_code = TEST_EXIT_FAILED logging.shutdown() if cleanup_tree_on_exit: shutil.rmtree(self.options.tmpdir) sys.exit(exit_code) # Methods to override in subclass test scripts. def set_test_params(self): """Tests must this method to change default values for number of nodes, topology, etc""" raise NotImplementedError def add_options(self, parser): """Override this method to add command-line options to the test""" pass def setup_chain(self): """Override this method to customize blockchain setup""" self.log.info("Initializing test directory " + self.options.tmpdir) if self.setup_clean_chain: self._initialize_chain_clean() else: self._initialize_chain() def setup_network(self): """Override this method to customize test network topology""" self.setup_nodes() # Connect the nodes as a "chain". This allows us # to split the network between nodes 1 and 2 to get # two halves that can work on competing chains. for i in range(self.num_nodes - 1): connect_nodes_bi(self.nodes[i], self.nodes[i + 1]) self.sync_all() def setup_nodes(self): """Override this method to customize test node setup""" extra_args = None if hasattr(self, "extra_args"): extra_args = self.extra_args self.add_nodes(self.num_nodes, extra_args) self.start_nodes() def run_test(self): """Tests must override this method to define test logic""" raise NotImplementedError # Public helper methods. These can be accessed by the subclass test scripts. def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None): """Instantiate TestNode objects""" if self.bind_to_localhost_only: extra_confs = [["bind=127.0.0.1"]] * num_nodes else: extra_confs = [[]] * num_nodes if extra_args is None: extra_args = [[]] * num_nodes if binary is None: binary = [self.options.bitcoind] * num_nodes assert_equal(len(extra_confs), num_nodes) assert_equal(len(extra_args), num_nodes) assert_equal(len(binary), num_nodes) for i in range(num_nodes): self.nodes.append(TestNode(i, get_datadir_path(self.options.tmpdir, i), host=rpchost, rpc_port=rpc_port(i), p2p_port=p2p_port(i), timewait=timewait, bitcoind=binary[i], bitcoin_cli=self.options.bitcoincli, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, extra_conf=extra_confs[i], extra_args=extra_args[i], use_cli=self.options.usecli)) if self.options.gravitonactivation: self.nodes[i].extend_default_args( ["-gravitonactivationtime={}".format(TIMESTAMP_IN_THE_PAST)]) def start_node(self, i, *args, **kwargs): """Start a bitcoind""" node = self.nodes[i] node.start(*args, **kwargs) node.wait_for_rpc_connection() if self.options.coveragedir is not None: coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) def start_nodes(self, extra_args=None, *args, **kwargs): """Start multiple bitcoinds""" if extra_args is None: extra_args = [None] * self.num_nodes assert_equal(len(extra_args), self.num_nodes) try: for i, node in enumerate(self.nodes): node.start(extra_args[i], *args, **kwargs) for node in self.nodes: node.wait_for_rpc_connection() except: # If one node failed to start, stop the others self.stop_nodes() raise if self.options.coveragedir is not None: for node in self.nodes: coverage.write_all_rpc_commands( self.options.coveragedir, node.rpc) def stop_node(self, i, expected_stderr=''): """Stop a bitcoind test node""" self.nodes[i].stop_node(expected_stderr) self.nodes[i].wait_until_stopped() def stop_nodes(self): """Stop multiple bitcoind test nodes""" for node in self.nodes: # Issue RPC to stop nodes node.stop_node() for node in self.nodes: # Wait for nodes to stop node.wait_until_stopped() def restart_node(self, i, extra_args=None): """Stop and start a test node""" self.stop_node(i) self.start_node(i, extra_args) def wait_for_node_exit(self, i, timeout): self.nodes[i].process.wait(timeout) def split_network(self): """ Split the network of four nodes into nodes 0/1 and 2/3. """ disconnect_nodes(self.nodes[1], self.nodes[2]) disconnect_nodes(self.nodes[2], self.nodes[1]) self.sync_all([self.nodes[:2], self.nodes[2:]]) def join_network(self): """ Join the (previously split) network halves together. """ connect_nodes_bi(self.nodes[1], self.nodes[2]) self.sync_all() def sync_all(self, node_groups=None): if not node_groups: node_groups = [self.nodes] for group in node_groups: sync_blocks(group) sync_mempools(group) # Private helper methods. These should not be accessed by the subclass test scripts. def _start_logging(self): # Add logger and logging handlers self.log = logging.getLogger('TestFramework') self.log.setLevel(logging.DEBUG) # Create file handler to log all messages fh = logging.FileHandler( self.options.tmpdir + '/test_framework.log', encoding='utf-8') fh.setLevel(logging.DEBUG) # Create console handler to log messages to stderr. By default this # logs only error messages, but can be configured with --loglevel. ch = logging.StreamHandler(sys.stdout) # User can provide log level as a number or string (eg DEBUG). loglevel # was caught as a string, so try to convert it to an int ll = int(self.options.loglevel) if self.options.loglevel.isdigit( ) else self.options.loglevel.upper() ch.setLevel(ll) # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted) formatter = logging.Formatter( fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S') formatter.converter = time.gmtime fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger self.log.addHandler(fh) self.log.addHandler(ch) if self.options.trace_rpc: rpc_logger = logging.getLogger("BitcoinRPC") rpc_logger.setLevel(logging.DEBUG) rpc_handler = logging.StreamHandler(sys.stdout) rpc_handler.setLevel(logging.DEBUG) rpc_logger.addHandler(rpc_handler) def _initialize_chain(self): """Initialize a pre-mined blockchain for use by the test. Create a cache of a 200-block-long chain (with wallet) for MAX_NODES Afterward, create num_nodes copies from the cache.""" assert self.num_nodes <= MAX_NODES create_cache = False for i in range(MAX_NODES): if not os.path.isdir(get_datadir_path(self.options.cachedir, i)): create_cache = True break if create_cache: self.log.debug("Creating data directories from cached datadir") # find and delete old cache directories if any exist for i in range(MAX_NODES): if os.path.isdir(get_datadir_path(self.options.cachedir, i)): shutil.rmtree(get_datadir_path(self.options.cachedir, i)) # Create cache directories, run bitcoinds: for i in range(MAX_NODES): datadir = initialize_datadir(self.options.cachedir, i) self.nodes.append(TestNode(i, get_datadir_path(self.options.cachedir, i), extra_conf=["bind=127.0.0.1"], extra_args=[], host=None, rpc_port=rpc_port( i), p2p_port=p2p_port(i), timewait=None, bitcoind=self.options.bitcoind, bitcoin_cli=self.options.bitcoincli, mocktime=self.mocktime, coverage_dir=None)) self.nodes[i].clear_default_args() self.nodes[i].extend_default_args(["-datadir=" + datadir]) if i > 0: self.nodes[i].extend_default_args( ["-connect=127.0.0.1:" + str(p2p_port(0))]) if self.options.gravitonactivation: self.nodes[i].extend_default_args( ["-gravitonactivationtime={}".format(TIMESTAMP_IN_THE_PAST)]) self.start_node(i) # Wait for RPC connections to be ready for node in self.nodes: node.wait_for_rpc_connection() # For backward compatibility of the python scripts with previous # versions of the cache, set mocktime to Jan 1, # 2014 + (201 * 10 * 60) self.mocktime = 1388534400 + (201 * 10 * 60) # Create a 200-block-long chain; each of the 4 first nodes # gets 25 mature blocks and 25 immature. # Note: To preserve compatibility with older versions of # initialize_chain, only 4 nodes will generate coins. # # blocks are created with timestamps 10 minutes apart # starting from 2010 minutes in the past block_time = self.mocktime - (201 * 10 * 60) for i in range(2): for peer in range(4): for j in range(25): set_node_times(self.nodes, block_time) self.nodes[peer].generate(1) block_time += 10 * 60 # Must sync before next peer starts generating blocks sync_blocks(self.nodes) # Shut them down, and clean up cache directories: self.stop_nodes() self.nodes = [] self.mocktime = 0 def cache_path(n, *paths): return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths) for i in range(MAX_NODES): for entry in os.listdir(cache_path(i)): if entry not in ['wallets', 'chainstate', 'blocks']: os.remove(cache_path(i, entry)) for i in range(self.num_nodes): from_dir = get_datadir_path(self.options.cachedir, i) to_dir = get_datadir_path(self.options.tmpdir, i) shutil.copytree(from_dir, to_dir) # Overwrite port/rpcport in bitcoin.conf initialize_datadir(self.options.tmpdir, i) def _initialize_chain_clean(self): """Initialize empty blockchain for use by the test. Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization.""" for i in range(self.num_nodes): initialize_datadir(self.options.tmpdir, i) class SkipTest(Exception): """This exception is raised to skip a test""" def __init__(self, message): self.message = message diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 54836222f..5d731c68f 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -1,462 +1,462 @@ #!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for bitcoind node under test""" import contextlib import decimal from enum import Enum import errno import http.client import json import logging import os import re import subprocess import sys import tempfile import time from .authproxy import JSONRPCException from .messages import COIN, CTransaction, FromHex from .util import ( append_config, delete_cookie_file, get_rpc_proxy, p2p_port, rpc_url, wait_until, ) # For Python 3.4 compatibility JSONDecodeError = getattr(json, "JSONDecodeError", ValueError) BITCOIND_PROC_WAIT_TIMEOUT = 60 class FailedToStartError(Exception): """Raised when a node fails to start correctly.""" class ErrorMatch(Enum): FULL_TEXT = 1 FULL_REGEX = 2 PARTIAL_REGEX = 3 class TestNode(): """A class for representing a bitcoind node under test. This class contains: - state about the node (whether it's running, etc) - a Python subprocess.Popen object representing the running process - an RPC connection to the node - one or more P2P connections to the node To make things easier for the test writer, any unrecognised messages will be dispatched to the RPC connection.""" def __init__(self, i, datadir, host, rpc_port, p2p_port, timewait, bitcoind, bitcoin_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False): self.index = i self.datadir = datadir self.stdout_dir = os.path.join(self.datadir, "stdout") self.stderr_dir = os.path.join(self.datadir, "stderr") self.host = host self.rpc_port = rpc_port self.p2p_port = p2p_port self.name = "testnode-{}".format(i) if timewait: self.rpc_timeout = timewait else: # Wait for up to 60 seconds for the RPC server to respond self.rpc_timeout = 60 self.binary = bitcoind if not os.path.isfile(self.binary): raise FileNotFoundError( "Binary '{}' could not be found.\nTry setting it manually:\n\tBITCOIND= {}".format(self.binary, sys.argv[0])) self.coverage_dir = coverage_dir if extra_conf != None: append_config(datadir, extra_conf) # Most callers will just need to add extra args to the default list # below. # For those callers that need more flexibility, they can access the # default args using the provided facilities. # Note that common args are set in the config file (see # initialize_datadir) self.extra_args = extra_args self.default_args = ["-datadir=" + self.datadir, "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=" + self.name] if not os.path.isfile(bitcoin_cli): raise FileNotFoundError( "Binary '{}' could not be found.\nTry setting it manually:\n\tBITCOINCLI= {}".format(bitcoin_cli, sys.argv[0])) self.cli = TestNodeCLI(bitcoin_cli, self.datadir) self.use_cli = use_cli self.running = False self.process = None self.rpc_connected = False self.rpc = None self.url = None self.relay_fee_cache = None self.log = logging.getLogger('TestFramework.node{}'.format(i)) # Whether to kill the node when this object goes away self.cleanup_on_exit = True self.p2ps = [] def _node_msg(self, msg: str) -> str: """Return a modified msg that identifies this node by its index as a debugging aid.""" return "[node {}] {}".format(self.index, msg) def _raise_assertion_error(self, msg: str): """Raise an AssertionError with msg modified to identify this node.""" raise AssertionError(self._node_msg(msg)) def __del__(self): # Ensure that we don't leave any bitcoind processes lying around after # the test ends if self.process and self.cleanup_on_exit: # Should only happen on test failure # Avoid using logger, as that may have already been shutdown when # this destructor is called. print(self._node_msg("Cleaning up leftover process")) self.process.kill() def __getattr__(self, name): """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" if self.use_cli: return getattr(self.cli, name) else: assert self.rpc is not None, self._node_msg( "Error: RPC not initialized") assert self.rpc_connected, self._node_msg( "Error: No RPC connection") return getattr(self.rpc, name) def clear_default_args(self): self.default_args.clear() def extend_default_args(self, args): self.default_args.extend(args) def remove_default_args(self, args): for rm_arg in args: # Remove all occurrences of rm_arg in self.default_args: # - if the arg is a flag (-flag), then the names must match # - if the arg is a value (-key=value) then the name must starts # with "-key=" (the '"' char is to avoid removing "-key_suffix" # arg is "-key" is the argument to remove). self.default_args = [def_arg for def_arg in self.default_args if rm_arg != def_arg and not def_arg.startswith(rm_arg + '=')] def start(self, extra_args=None, stdout=None, stderr=None, *args, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args # Add a new stdout and stderr file each time bitcoind is started if stderr is None: stderr = tempfile.NamedTemporaryFile( dir=self.stderr_dir, delete=False) if stdout is None: stdout = tempfile.NamedTemporaryFile( dir=self.stdout_dir, delete=False) self.stderr = stderr self.stdout = stdout # Delete any existing cookie file -- if such a file exists (eg due to # unclean shutdown), it will get overwritten anyway by bitcoind, and # potentially interfere with our attempt to authenticate delete_cookie_file(self.datadir) # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1") self.process = subprocess.Popen( [self.binary] + self.default_args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, *args, **kwargs) self.running = True self.log.debug("bitcoind started, waiting for RPC to come up") def wait_for_rpc_connection(self): """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""" # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): if self.process.poll() is not None: raise FailedToStartError(self._node_msg( 'bitcoind exited with status {} during initialization'.format(self.process.returncode))) try: self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.host, self.rpc_port), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir) self.rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC connection is up self.rpc_connected = True self.url = self.rpc.url self.log.debug("RPC successfully started") return except IOError as e: if e.errno != errno.ECONNREFUSED: # Port not yet open? raise # unknown IO error except JSONRPCException as e: # Initialization phase if e.error['code'] != -28: # RPC in warmup? raise # unknown JSON RPC exception except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting if "No RPC credentials" not in str(e): raise time.sleep(1.0 / poll_per_s) self._raise_assertion_error("Unable to connect to bitcoind") def get_wallet_rpc(self, wallet_name): if self.use_cli: return self.cli("-rpcwallet={}".format(wallet_name)) else: assert self.rpc is not None, self._node_msg( "Error: RPC not initialized") assert self.rpc_connected, self._node_msg( "Error: RPC not connected") wallet_path = "wallet/{}".format(wallet_name) return self.rpc / wallet_path def stop_node(self, expected_stderr=''): """Stop the node.""" if not self.running: return self.log.debug("Stopping node") try: self.stop() except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") # Check that stderr is as expected self.stderr.seek(0) stderr = self.stderr.read().decode('utf-8').strip() if stderr != expected_stderr: raise AssertionError( "Unexpected stderr {} != {}".format(stderr, expected_stderr)) del self.p2ps[:] def is_node_stopped(self): """Checks whether the node has stopped. Returns True if the node has stopped. False otherwise. This method is responsible for freeing resources (self.process).""" if not self.running: return True return_code = self.process.poll() if return_code is None: return False # process has stopped. Assert that it didn't return an error code. assert return_code == 0, self._node_msg( "Node returned non-zero exit code ({}) when stopping".format(return_code)) self.running = False self.process = None self.rpc_connected = False self.rpc = None self.log.debug("Node stopped") return True def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT): wait_until(self.is_node_stopped, timeout=timeout) @contextlib.contextmanager def assert_debug_log(self, expected_msgs): debug_log = os.path.join(self.datadir, 'regtest', 'debug.log') with open(debug_log, encoding='utf-8') as dl: dl.seek(0, 2) prev_size = dl.tell() try: yield finally: with open(debug_log, encoding='utf-8') as dl: dl.seek(prev_size) log = dl.read() print_log = " - " + "\n - ".join(log.splitlines()) for expected_msg in expected_msgs: if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None: self._raise_assertion_error( 'Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log)) def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs): """Attempt to start the node and expect it to raise an error. extra_args: extra arguments to pass through to bitcoind expected_msg: regex that stderr should match when bitcoind fails Will throw if bitcoind starts without an error. Will throw if an expected_msg is provided and it does not match bitcoind's stdout.""" with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \ tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout: try: self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs) self.wait_for_rpc_connection() self.stop_node() self.wait_until_stopped() except FailedToStartError as e: self.log.debug('bitcoind failed to start: {}'.format(e)) self.running = False self.process = None # Check stderr for expected message if expected_msg is not None: log_stderr.seek(0) stderr = log_stderr.read().decode('utf-8').strip() if match == ErrorMatch.PARTIAL_REGEX: if re.search(expected_msg, stderr, flags=re.MULTILINE) is None: self._raise_assertion_error( 'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr)) elif match == ErrorMatch.FULL_REGEX: if re.fullmatch(expected_msg, stderr) is None: self._raise_assertion_error( 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr)) elif match == ErrorMatch.FULL_TEXT: if expected_msg != stderr: self._raise_assertion_error( 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr)) else: if expected_msg is None: assert_msg = "bitcoind should have exited with an error" else: assert_msg = "bitcoind should have exited with expected error " + expected_msg self._raise_assertion_error(assert_msg) def node_encrypt_wallet(self, passphrase): """"Encrypts the wallet. This causes bitcoind to shutdown, so this method takes care of cleaning up resources.""" self.encryptwallet(passphrase) self.wait_until_stopped() def relay_fee(self, cached=True): if not self.relay_fee_cache or not cached: self.relay_fee_cache = self.getnetworkinfo()["relayfee"] return self.relay_fee_cache def calculate_fee(self, tx): # Relay fee is in satoshis per KB. Thus the 1000, and the COIN added # to get back to an amount of satoshis. billable_size_estimate = tx.billable_size() # Add some padding for signatures # NOTE: Fees must be calculated before signatures are added, # so they will never be included in the billable_size above. billable_size_estimate += len(tx.vin) * 81 return int(self.relay_fee() / 1000 * billable_size_estimate * COIN) def calculate_fee_from_txid(self, txid): ctx = FromHex(CTransaction(), self.getrawtransaction(txid)) return self.calculate_fee(ctx) def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs): """Add a p2p connection to the node. This method adds the p2p connection to the self.p2ps list and also returns the connection to the caller.""" if 'dstport' not in kwargs: kwargs['dstport'] = p2p_port(self.index) if 'dstaddr' not in kwargs: kwargs['dstaddr'] = '127.0.0.1' p2p_conn.peer_connect(**kwargs)() self.p2ps.append(p2p_conn) if wait_for_verack: p2p_conn.wait_for_verack() return p2p_conn @property def p2p(self): """Return the first p2p connection Convenience property - most tests only use a single p2p connection to each node, so this saves having to write node.p2ps[0] many times.""" assert self.p2ps, self._node_msg("No p2p connection") return self.p2ps[0] def disconnect_p2ps(self): """Close all p2p connections to the node.""" for p in self.p2ps: p.peer_disconnect() del self.p2ps[:] class TestNodeCLIAttr: def __init__(self, cli, command): self.cli = cli self.command = command def __call__(self, *args, **kwargs): return self.cli.send_cli(self.command, *args, **kwargs) def get_request(self, *args, **kwargs): return lambda: self(*args, **kwargs) class TestNodeCLI(): """Interface to bitcoin-cli for an individual node""" def __init__(self, binary, datadir): self.options = [] self.binary = binary self.datadir = datadir self.input = None self.log = logging.getLogger('TestFramework.bitcoincli') def __call__(self, *options, input=None): # TestNodeCLI is callable with bitcoin-cli command-line options cli = TestNodeCLI(self.binary, self.datadir) cli.options = [str(o) for o in options] cli.input = input return cli def __getattr__(self, command): return TestNodeCLIAttr(self, command) def batch(self, requests): results = [] for request in requests: try: results.append(dict(result=request())) except JSONRPCException as e: results.append(dict(error=e)) return results def send_cli(self, command=None, *args, **kwargs): """Run bitcoin-cli command. Deserializes returned string as python object.""" pos_args = [str(arg) for arg in args] named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()] assert not ( pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call" p_args = [self.binary, "-datadir=" + self.datadir] + self.options if named_args: p_args += ["-named"] if command is not None: p_args += [command] p_args += pos_args + named_args self.log.debug("Running bitcoin-cli command: {}".format(command)) process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) cli_stdout, cli_stderr = process.communicate(input=self.input) returncode = process.poll() if returncode: match = re.match( r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) if match: code, message = match.groups() raise JSONRPCException(dict(code=int(code), message=message)) # Ignore cli_stdout, raise with cli_stderr raise subprocess.CalledProcessError( returncode, self.binary, output=cli_stderr) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) except JSONDecodeError: return cli_stdout.rstrip("\n") diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 5f25c21d0..5195a3cee 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -1,648 +1,648 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Helpful routines for regression testing.""" from base64 import b64encode from binascii import unhexlify from decimal import Decimal, ROUND_DOWN import hashlib import inspect import json import logging import os import random import re from subprocess import CalledProcessError import time from . import coverage from .authproxy import AuthServiceProxy, JSONRPCException logger = logging.getLogger("TestFramework.utils") # Assert functions ################## def assert_fee_amount(fee, tx_size, fee_per_kB, wiggleroom=2): """ Assert the fee was in range wiggleroom defines an amount that the test expects the wallet to be off by when estimating fees. This can be due to the dummy signature that is added during fee calculation, or due to the wallet funding transactions using the ceiling of the calculated fee. """ target_fee = round(tx_size * fee_per_kB / 1000, 8) if fee < (tx_size - wiggleroom) * fee_per_kB / 1000: raise AssertionError( "Fee of {} BCH too low! (Should be {} BCH)".format(str(fee), str(target_fee))) if fee > (tx_size + wiggleroom) * fee_per_kB / 1000: raise AssertionError( "Fee of {} BCH too high! (Should be {} BCH)".format(str(fee), str(target_fee))) def assert_equal(thing1, thing2, *args): if thing1 != thing2 or any(thing1 != arg for arg in args): raise AssertionError("not({})".format(" == ".join(str(arg) for arg in (thing1, thing2) + args))) def assert_greater_than(thing1, thing2): if thing1 <= thing2: raise AssertionError("{} <= {}".format(str(thing1), str(thing2))) def assert_greater_than_or_equal(thing1, thing2): if thing1 < thing2: raise AssertionError("{} < {}".format(str(thing1), str(thing2))) def assert_raises(exc, fun, *args, **kwds): assert_raises_message(exc, None, fun, *args, **kwds) def assert_raises_message(exc, message, fun, *args, **kwds): try: fun(*args, **kwds) except JSONRPCException: raise AssertionError( "Use assert_raises_rpc_error() to test RPC failures") except exc as e: if message is not None and message not in e.error['message']: raise AssertionError( "Expected substring not found:" + e.error['message']) except Exception as e: raise AssertionError( "Unexpected exception raised: " + type(e).__name__) else: raise AssertionError("No exception raised") def assert_raises_process_error(returncode, output, fun, *args, **kwds): """Execute a process and asserts the process return code and output. Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError and verifies that the return code and output are as expected. Throws AssertionError if no CalledProcessError was raised or if the return code and output are not as expected. Args: returncode (int): the process return code. output (string): [a substring of] the process output. fun (function): the function to call. This should execute a process. args*: positional arguments for the function. kwds**: named arguments for the function. """ try: fun(*args, **kwds) except CalledProcessError as e: if returncode != e.returncode: raise AssertionError( "Unexpected returncode {}".format(e.returncode)) if output not in e.output: raise AssertionError("Expected substring not found:" + e.output) else: raise AssertionError("No exception raised") def assert_raises_rpc_error(code, message, fun, *args, **kwds): """Run an RPC and verify that a specific JSONRPC exception code and message is raised. Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException and verifies that the error code and message are as expected. Throws AssertionError if no JSONRPCException was raised or if the error code/message are not as expected. Args: code (int), optional: the error code returned by the RPC call (defined in src/rpc/protocol.h). Set to None if checking the error code is not required. message (string), optional: [a substring of] the error string returned by the RPC call. Set to None if checking the error string is not required. fun (function): the function to call. This should be the name of an RPC. args*: positional arguments for the function. kwds**: named arguments for the function. """ assert try_rpc(code, message, fun, *args, **kwds), "No exception raised" def try_rpc(code, message, fun, *args, **kwds): """Tries to run an rpc command. Test against error code and message if the rpc fails. Returns whether a JSONRPCException was raised.""" try: fun(*args, **kwds) except JSONRPCException as e: # JSONRPCException was thrown as expected. Check the code and message values are correct. if (code is not None) and (code != e.error["code"]): raise AssertionError( "Unexpected JSONRPC error code {}".format(e.error["code"])) if (message is not None) and (message not in e.error['message']): raise AssertionError( "Expected substring not found:" + e.error['message']) return True except Exception as e: raise AssertionError( "Unexpected exception raised: " + type(e).__name__) else: return False def assert_is_hex_string(string): try: int(string, 16) except Exception as e: raise AssertionError( "Couldn't interpret {!r} as hexadecimal; raised: {}".format(string, e)) def assert_is_hash_string(string, length=64): if not isinstance(string, str): raise AssertionError( "Expected a string, got type {!r}".format(type(string))) elif length and len(string) != length: raise AssertionError( "String of length {} expected; got {}".format(length, len(string))) elif not re.match('[abcdef0-9]+$', string): raise AssertionError( "String {!r} contains invalid characters for a hash.".format(string)) def assert_array_result(object_array, to_match, expected, should_not_find=False): """ Pass in array of JSON objects, a dictionary with key/value pairs to match against, and another dictionary with expected key/value pairs. If the should_not_find flag is true, to_match should not be found in object_array """ if should_not_find: assert_equal(expected, {}) num_matched = 0 for item in object_array: all_match = True for key, value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue elif should_not_find: num_matched = num_matched + 1 for key, value in expected.items(): if item[key] != value: raise AssertionError("{} : expected {}={}".format( str(item), str(key), str(value))) num_matched = num_matched + 1 if num_matched == 0 and not should_not_find: raise AssertionError("No objects matched {}".format(str(to_match))) if num_matched > 0 and should_not_find: raise AssertionError("Objects were found {}".format(str(to_match))) # Utility functions ################### def check_json_precision(): """Make sure json library being used does not lose precision converting BCH values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def count_bytes(hex_string): return len(bytearray.fromhex(hex_string)) def b_2_x(byte_str): return byte_str.hex() def hash256(byte_str): sha256 = hashlib.sha256() sha256.update(byte_str) sha256d = hashlib.sha256() sha256d.update(sha256.digest()) return sha256d.digest()[::-1] def hex_str_to_bytes(hex_str): return unhexlify(hex_str.encode('ascii')) def str_to_b64str(string): return b64encode(string.encode('utf-8')).decode('ascii') def satoshi_round(amount): return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None): if attempts == float('inf') and timeout == float('inf'): timeout = 60 attempt = 0 time_end = time.time() + timeout while attempt < attempts and time.time() < time_end: if lock: with lock: if predicate(): return else: if predicate(): return attempt += 1 time.sleep(0.05) # Print the cause of the timeout predicate_source = inspect.getsourcelines(predicate) logger.error("wait_until() failed. Predicate: {}".format(predicate_source)) if attempt >= attempts: raise AssertionError("Predicate {} not true after {} attempts".format( predicate_source, attempts)) elif time.time() >= time_end: raise AssertionError( "Predicate {} not true after {} seconds".format(predicate_source, timeout)) raise RuntimeError('Unreachable') # RPC/P2P connection constants and functions ############################################ # The maximum number of nodes a single test can spawn MAX_NODES = 8 # Don't assign rpc or p2p ports lower than this PORT_MIN = 11000 # The number of ports to "reserve" for p2p and rpc, each PORT_RANGE = 5000 class PortSeed: # Must be initialized with a unique integer for each process n = None def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None): """ Args: url (str): URL of the RPC server to call node_number (int): the node number (or id) that this calls to Kwargs: timeout (int): HTTP timeout in seconds Returns: AuthServiceProxy. convenience object for making RPC calls. """ proxy_kwargs = {} if timeout is not None: proxy_kwargs['timeout'] = timeout proxy = AuthServiceProxy(url, **proxy_kwargs) proxy.url = url # store URL on proxy for info coverage_logfile = coverage.get_filename( coveragedir, node_number) if coveragedir else None return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile) def p2p_port(n): assert n <= MAX_NODES return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) def rpc_port(n): return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) def rpc_url(datadir, host, port): rpc_u, rpc_p = get_auth_cookie(datadir) if host == None: host = '127.0.0.1' return "http://{}:{}@{}:{}".format(rpc_u, rpc_p, host, int(port)) # Node functions ################ def initialize_datadir(dirname, n): datadir = get_datadir_path(dirname, n) if not os.path.isdir(datadir): os.makedirs(datadir) with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f: f.write("regtest=1\n") f.write("[regtest]\n") f.write("port=" + str(p2p_port(n)) + "\n") f.write("rpcport=" + str(rpc_port(n)) + "\n") f.write("server=1\n") f.write("keypool=1\n") f.write("discover=0\n") f.write("listenonion=0\n") f.write("usecashaddr=1\n") os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True) os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True) return datadir def get_datadir_path(dirname, n): return os.path.join(dirname, "node" + str(n)) def append_config(datadir, options): with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f: for option in options: f.write(option + "\n") def get_auth_cookie(datadir): user = None password = None if os.path.isfile(os.path.join(datadir, "bitcoin.conf")): with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f: for line in f: if line.startswith("rpcuser="): assert user is None # Ensure that there is only one rpcuser line user = line.split("=")[1].strip("\n") if line.startswith("rpcpassword="): assert password is None # Ensure that there is only one rpcpassword line password = line.split("=")[1].strip("\n") if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")): with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f: userpass = f.read() split_userpass = userpass.split(':') user = split_userpass[0] password = split_userpass[1] if user is None or password is None: raise ValueError("No RPC credentials") return user, password # If a cookie file exists in the given datadir, delete it. def delete_cookie_file(datadir): if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")): logger.debug("Deleting leftover cookie file") os.remove(os.path.join(datadir, "regtest", ".cookie")) def set_node_times(nodes, t): for node in nodes: node.setmocktime(t) def disconnect_nodes(from_node, to_node): for peer_id in [peer['id'] for peer in from_node.getpeerinfo() if to_node.name in peer['subver']]: try: from_node.disconnectnode(nodeid=peer_id) except JSONRPCException as e: # If this node is disconnected between calculating the peer id # and issuing the disconnect, don't worry about it. # This avoids a race condition if we're mass-disconnecting peers. if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED raise # wait to disconnect wait_until(lambda: [peer['id'] for peer in from_node.getpeerinfo( ) if to_node.name in peer['subver']] == [], timeout=5) def connect_nodes(from_node, to_node): host = to_node.host if host == None: host = '127.0.0.1' ip_port = host + ':' + str(to_node.p2p_port) from_node.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying wait_until(lambda: all(peer['version'] != 0 for peer in from_node.getpeerinfo())) def connect_nodes_bi(a, b): connect_nodes(a, b) connect_nodes(b, a) def sync_blocks(rpc_connections, *, wait=1, timeout=60): """ Wait until everybody has the same tip. sync_blocks needs to be called with an rpc_connections set that has least one node already synced to the latest, stable tip, otherwise there's a chance it might return before all nodes are stably synced. """ stop_time = time.time() + timeout while time.time() <= stop_time: best_hash = [x.getbestblockhash() for x in rpc_connections] if best_hash.count(best_hash[0]) == len(rpc_connections): return time.sleep(wait) raise AssertionError("Block sync timed out:{}".format( "".join("\n {!r}".format(b) for b in best_hash))) def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True): """ Wait until everybody has the same transactions in their memory pools """ stop_time = time.time() + timeout while time.time() <= stop_time: pool = [set(r.getrawmempool()) for r in rpc_connections] if pool.count(pool[0]) == len(rpc_connections): if flush_scheduler: for r in rpc_connections: r.syncwithvalidationinterfacequeue() return time.sleep(wait) raise AssertionError("Mempool sync timed out:{}".format( "".join("\n {!r}".format(m) for m in pool))) # Transaction/Block functions ############################# def find_output(node, txid, amount): """ Return index to output of txid with value amount Raises exception if there is none. """ txdata = node.getrawtransaction(txid, 1) for i in range(len(txdata["vout"])): if txdata["vout"][i]["value"] == amount: return i raise RuntimeError("find_output txid {} : {} not found".format( txid, str(amount))) def gather_inputs(from_node, amount_needed, confirmations_required=1): """ Return a random set of unspent txouts that are enough to pay amount_needed """ assert confirmations_required >= 0 utxo = from_node.listunspent(confirmations_required) random.shuffle(utxo) inputs = [] total_in = Decimal("0.00000000") while total_in < amount_needed and len(utxo) > 0: t = utxo.pop() total_in += t["amount"] inputs.append( {"txid": t["txid"], "vout": t["vout"], "address": t["address"]}) if total_in < amount_needed: raise RuntimeError("Insufficient funds: need {}, have {}".format( amount_needed, total_in)) return (total_in, inputs) def make_change(from_node, amount_in, amount_out, fee): """ Create change output(s), return them """ outputs = {} amount = amount_out + fee change = amount_in - amount if change > amount * 2: # Create an extra change output to break up big inputs change_address = from_node.getnewaddress() # Split change in two, being careful of rounding: outputs[change_address] = Decimal( change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) change = amount_in - amount - outputs[change_address] if change > 0: outputs[from_node.getnewaddress()] = change return outputs def send_zeropri_transaction(from_node, to_node, amount, fee): """ Create&broadcast a zero-priority transaction. Returns (txid, hex-encoded-txdata) Ensures transaction is zero-priority by first creating a send-to-self, then using its output """ # Create a send-to-self with confirmed inputs: self_address = from_node.getnewaddress() (total_in, inputs) = gather_inputs(from_node, amount + fee * 2) outputs = make_change(from_node, total_in, amount + fee, fee) outputs[self_address] = float(amount + fee) self_rawtx = from_node.createrawtransaction(inputs, outputs) self_signresult = from_node.signrawtransactionwithwallet(self_rawtx) self_txid = from_node.sendrawtransaction(self_signresult["hex"], True) vout = find_output(from_node, self_txid, amount + fee) # Now immediately spend the output to create a 1-input, 1-output # zero-priority transaction: inputs = [{"txid": self_txid, "vout": vout}] outputs = {to_node.getnewaddress(): float(amount)} rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransactionwithwallet(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"]) def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random zero-priority transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment * random.randint(0, fee_variants) (txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee) return (txid, txhex, fee) def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment * random.randint(0, fee_variants) (total_in, inputs) = gather_inputs(from_node, amount + fee) outputs = make_change(from_node, total_in, amount, fee) outputs[to_node.getnewaddress()] = float(amount) rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransactionwithwallet(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"], fee) # Create large OP_RETURN txouts that can be appended to a transaction # to make it large (helper for constructing large transactions). def gen_return_txouts(): # Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create # So we have big transactions (and therefore can't fit very many into each block) # create one script_pubkey script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes for i in range(512): script_pubkey = script_pubkey + "01" # concatenate 128 txouts of above script_pubkey which we'll insert before # the txout for change txouts = "81" for k in range(128): # add txout value txouts = txouts + "0000000000000000" # add length of script_pubkey txouts = txouts + "fd0402" # add script_pubkey txouts = txouts + script_pubkey return txouts def create_tx(node, coinbase, to_address, amount): inputs = [{"txid": coinbase, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransactionwithwallet(rawtx) assert_equal(signresult["complete"], True) return signresult["hex"] # Create a spend of each passed-in utxo, splicing in "txouts" to each raw # transaction to make it large. See gen_return_txouts() above. def create_lots_of_big_transactions(node, txouts, utxos, num, fee): addr = node.getnewaddress() txids = [] for _ in range(num): t = utxos.pop() inputs = [{"txid": t["txid"], "vout": t["vout"]}] outputs = {} change = t['amount'] - fee outputs[addr] = satoshi_round(change) rawtx = node.createrawtransaction(inputs, outputs) newtx = rawtx[0:92] newtx = newtx + txouts newtx = newtx + rawtx[94:] signresult = node.signrawtransactionwithwallet( newtx, None, "NONE|FORKID") txid = node.sendrawtransaction(signresult["hex"], True) txids.append(txid) return txids def find_vout_for_address(node, txid, addr): """ Locate the vout index of the given transaction sending to the given address. Raises runtime error exception if not found. """ tx = node.getrawtransaction(txid, True) for i in range(len(tx["vout"])): if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]): return i raise RuntimeError( "Vout not found for address: txid={}, addr={}".format(txid, addr)) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index a490e591e..8c77d8121 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -1,761 +1,761 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Run regression test suite. This module calls down into individual test cases via subprocess. It will forward all unrecognized arguments onto the individual test scripts. Functional tests are disabled on Windows by default. Use --force to run them anyway. For a description of arguments recognized by test scripts, see `test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`. """ import argparse from collections import deque import configparser import datetime import os import time import shutil import sys import subprocess import tempfile import re import logging import xml.etree.ElementTree as ET import json import threading import multiprocessing from queue import Queue, Empty # Formatting. Default colors to empty strings. BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") try: # Make sure python thinks it can write unicode to its stdout "\u2713".encode("utf_8").decode(sys.stdout.encoding) TICK = "✓ " CROSS = "✖ " CIRCLE = "○ " except UnicodeDecodeError: TICK = "P " CROSS = "x " CIRCLE = "o " if os.name == 'posix': # primitive formatting on supported # terminal via ANSI escape sequences: BOLD = ('\033[0m', '\033[1m') BLUE = ('\033[0m', '\033[0;34m') RED = ('\033[0m', '\033[0;31m') GREY = ('\033[0m', '\033[1;30m') TEST_EXIT_PASSED = 0 TEST_EXIT_SKIPPED = 77 NON_SCRIPTS = [ # These are python files that live in the functional tests directory, but are not test scripts. "combine_logs.py", "create_cache.py", "test_runner.py", ] TEST_PARAMS = { # Some test can be run with additional parameters. # When a test is listed here, the it will be run without parameters # as well as with additional parameters listed here. # This: # example "testName" : [["--param1", "--param2"] , ["--param3"]] # will run the test 3 times: # testName # testName --param1 --param2 # testname --param3 "wallet_txn_doublespend.py": [["--mineblock"]], "wallet_txn_clone.py": [["--mineblock"]], "wallet_multiwallet.py": [["--usecli"]], } # Used to limit the number of tests, when list of tests is not provided on command line # When --extended is specified, we run all tests, otherwise # we only run a test if its execution time in seconds does not exceed EXTENDED_CUTOFF DEFAULT_EXTENDED_CUTOFF = 40 DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1 class TestCase(): """ Data structure to hold and run information necessary to launch a test case. """ def __init__(self, test_num, test_case, tests_dir, tmpdir, flags=None): self.tests_dir = tests_dir self.tmpdir = tmpdir self.test_case = test_case self.test_num = test_num self.flags = flags def run(self, portseed_offset): t = self.test_case portseed = self.test_num + portseed_offset portseed_arg = ["--portseed={}".format(portseed)] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) test_argv = t.split() testdir = os.path.join("{}", "{}_{}").format( self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed) tmpdir_arg = ["--tmpdir={}".format(testdir)] name = t time0 = time.time() process = subprocess.Popen([sys.executable, os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg, universal_newlines=True, stdout=log_stdout, stderr=log_stderr) process.wait() log_stdout.seek(0), log_stderr.seek(0) [stdout, stderr] = [l.read().decode('utf-8') for l in (log_stdout, log_stderr)] log_stdout.close(), log_stderr.close() if process.returncode == TEST_EXIT_PASSED and stderr == "": status = "Passed" elif process.returncode == TEST_EXIT_SKIPPED: status = "Skipped" else: status = "Failed" return TestResult(self.test_num, name, testdir, status, int(time.time() - time0), stdout, stderr) def on_ci(): return os.getenv('TRAVIS') == 'true' or os.getenv('TEAMCITY_VERSION') != None def main(): # Read config generated by configure. config = configparser.ConfigParser() configfile = os.path.join(os.path.abspath( os.path.dirname(__file__)), "..", "config.ini") config.read_file(open(configfile, encoding="utf8")) src_dir = config["environment"]["SRCDIR"] build_dir = config["environment"]["BUILDDIR"] tests_dir = os.path.join(src_dir, 'test', 'functional') # Parse arguments and pass through unrecognised args parser = argparse.ArgumentParser(add_help=False, usage='%(prog)s [test_runner.py options] [script options] [scripts]', description=__doc__, epilog=''' Help text and arguments for individual test script:''', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.') parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface') parser.add_argument( '--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.') parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests') parser.add_argument('--cutoff', type=int, default=DEFAULT_EXTENDED_CUTOFF, help='set the cutoff runtime for what tests get run') parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).') parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit') parser.add_argument('--jobs', '-j', type=int, default=DEFAULT_JOBS, help='how many test scripts to run in parallel. Default=4.') parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.') parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs') parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs") parser.add_argument('--junitoutput', '-J', default=os.path.join(build_dir, 'junit_results.xml'), help="file that will store JUnit formatted test results.") args, unknown_args = parser.parse_known_args() # args to be passed on always start with two dashes; tests are the # remaining unknown args tests = [arg for arg in unknown_args if arg[:2] != "--"] passon_args = [arg for arg in unknown_args if arg[:2] == "--"] passon_args.append("--configfile={}".format(configfile)) # Set up logging logging_level = logging.INFO if args.quiet else logging.DEBUG logging.basicConfig(format='%(message)s', level=logging_level) # Create base test directory tmpdir = os.path.join("{}", "bitcoin_test_runner_{:%Y%m%d_%H%M%S}").format( args.tmpdirprefix, datetime.datetime.now()) os.makedirs(tmpdir) logging.debug("Temporary test directory at {}".format(tmpdir)) enable_wallet = config["components"].getboolean("ENABLE_WALLET") enable_utils = config["components"].getboolean("ENABLE_UTILS") enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND") if config["environment"]["EXEEXT"] == ".exe" and not args.force: # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9 # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964 print( "Tests currently disabled on Windows by default. Use --force option to enable") sys.exit(0) if not (enable_wallet and enable_utils and enable_bitcoind): print( "No functional tests to run. Wallet, utils, and bitcoind must all be enabled") print( "Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make") sys.exit(0) # Build list of tests all_scripts = get_all_scripts_from_disk(tests_dir, NON_SCRIPTS) # Check all tests with parameters actually exist for test in TEST_PARAMS: if not test in all_scripts: print("ERROR: Test with parameter {} does not exist, check it has " "not been renamed or deleted".format(test)) sys.exit(1) if tests: # Individual tests have been specified. Run specified tests that exist # in the all_scripts list. Accept the name with or without .py # extension. individual_tests = [ re.sub(r"\.py$", "", t) + ".py" for t in tests if not t.endswith('*')] test_list = [] for t in individual_tests: if t in all_scripts: test_list.append(t) else: print("{}WARNING!{} Test '{}' not found in full test list.".format( BOLD[1], BOLD[0], t)) # Allow for wildcard at the end of the name, so a single input can # match multiple tests for test in tests: if test.endswith('*'): test_list.extend( [t for t in all_scripts if t.startswith(test[:-1])]) # do not cut off explicitly specified tests cutoff = sys.maxsize else: # No individual tests have been specified. # Run all tests that do not exceed test_list = all_scripts cutoff = args.cutoff if args.extended: cutoff = sys.maxsize # Remove the test cases that the user has explicitly asked to exclude. if args.exclude: tests_excl = [re.sub(r"\.py$", "", t) + ".py" for t in args.exclude.split(',')] for exclude_test in tests_excl: if exclude_test in test_list: test_list.remove(exclude_test) else: print("{}WARNING!{} Test '{}' not found in current test list.".format( BOLD[1], BOLD[0], exclude_test)) # Update timings from build_dir only if separate build directory is used. # We do not want to pollute source directory. build_timings = None if (src_dir != build_dir): build_timings = Timings(os.path.join(build_dir, 'timing.json')) # Always use timings from scr_dir if present src_timings = Timings(os.path.join( src_dir, "test", "functional", 'timing.json')) # Add test parameters and remove long running tests if needed test_list = get_tests_to_run( test_list, TEST_PARAMS, cutoff, src_timings) if not test_list: print("No valid test scripts specified. Check that your test is in one " "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") sys.exit(0) if args.help: # Print help for test_runner.py, then print help of the first script # and exit. parser.print_help() subprocess.check_call( [sys.executable, os.path.join(tests_dir, test_list[0]), '-h']) sys.exit(0) if not args.keepcache: shutil.rmtree(os.path.join(build_dir, "test", "cache"), ignore_errors=True) run_tests(test_list, build_dir, tests_dir, args.junitoutput, tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen, build_timings) def run_tests(test_list, build_dir, tests_dir, junitoutput, tmpdir, num_jobs, enable_coverage=False, args=[], combined_logs_len=0, build_timings=None): # Warn if bitcoind is already running (unix only) try: pidofOutput = subprocess.check_output(["pidof", "bitcoind"]) if pidofOutput is not None and pidofOutput != b'': print("{}WARNING!{} There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!".format( BOLD[1], BOLD[0])) except (OSError, subprocess.SubprocessError): pass # Warn if there is a cache directory cache_dir = os.path.join(build_dir, "test", "cache") if os.path.isdir(cache_dir): print("{}WARNING!{} There is a cache directory here: {}. If tests fail unexpectedly, try deleting the cache directory.".format( BOLD[1], BOLD[0], cache_dir)) flags = ['--cachedir={}'.format(cache_dir)] + args if enable_coverage: coverage = RPCCoverage() flags.append(coverage.flag) logging.debug( "Initializing coverage directory at {}".format(coverage.dir)) else: coverage = None if len(test_list) > 1 and num_jobs > 1: # Populate cache try: subprocess.check_output([sys.executable, os.path.join( tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir={}", "cache") .format(tmpdir)]) except subprocess.CalledProcessError as e: sys.stdout.buffer.write(e.output) raise # Run Tests time0 = time.time() test_results = execute_test_processes( num_jobs, test_list, tests_dir, tmpdir, flags) runtime = int(time.time() - time0) max_len_name = len(max(test_list, key=len)) print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len) save_results_as_junit(test_results, junitoutput, runtime) if (build_timings is not None): build_timings.save_timings(test_results) if coverage: coverage.report_rpc_coverage() logging.debug("Cleaning up coverage data") coverage.cleanup() # Clear up the temp directory if all subdirectories are gone if not os.listdir(tmpdir): os.rmdir(tmpdir) all_passed = all( map(lambda test_result: test_result.was_successful, test_results)) sys.exit(not all_passed) def execute_test_processes(num_jobs, test_list, tests_dir, tmpdir, flags): update_queue = Queue() job_queue = Queue() test_results = [] poll_timeout = 10 # seconds # In case there is a graveyard of zombie bitcoinds, we can apply a # pseudorandom offset to hopefully jump over them. # (625 is PORT_RANGE/MAX_NODES) portseed_offset = int(time.time() * 1000) % 625 ## # Define some helper functions we will need for threading. ## def handle_message(message, running_jobs): """ handle_message handles a single message from handle_test_cases """ if isinstance(message, TestCase): running_jobs.append((message.test_num, message.test_case)) print("{}{}{} started".format(BOLD[1], message.test_case, BOLD[0])) return if isinstance(message, TestResult): test_result = message running_jobs.remove((test_result.num, test_result.name)) test_results.append(test_result) if test_result.status == "Passed": print("{}{}{} passed, Duration: {} s".format( BOLD[1], test_result.name, BOLD[0], test_result.time)) elif test_result.status == "Skipped": print("{}{}{} skipped".format( BOLD[1], test_result.name, BOLD[0])) else: print("{}{}{} failed, Duration: {} s\n".format( BOLD[1], test_result.name, BOLD[0], test_result.time)) print(BOLD[1] + 'stdout:' + BOLD[0]) print(test_result.stdout) print(BOLD[1] + 'stderr:' + BOLD[0]) print(test_result.stderr) return assert False, "we should not be here" def handle_update_messages(): """ handle_update_messages waits for messages to be sent from handle_test_cases via the update_queue. It serializes the results so we can print nice status update messages. """ printed_status = False running_jobs = [] while True: message = None try: message = update_queue.get(True, poll_timeout) if message is None: break # We printed a status message, need to kick to the next line # before printing more. if printed_status: print() printed_status = False handle_message(message, running_jobs) update_queue.task_done() except Empty: if not on_ci(): print("Running jobs: {}".format(", ".join([j[1] for j in running_jobs])), end="\r") sys.stdout.flush() printed_status = True def handle_test_cases(): """ job_runner represents a single thread that is part of a worker pool. It waits for a test, then executes that test. It also reports start and result messages to handle_update_messages """ while True: test = job_queue.get() if test is None: break # Signal that the test is starting to inform the poor waiting # programmer update_queue.put(test) result = test.run(portseed_offset) update_queue.put(result) job_queue.task_done() ## # Setup our threads, and start sending tasks ## # Start our result collection thread. t = threading.Thread(target=handle_update_messages) t.setDaemon(True) t.start() # Start some worker threads for j in range(num_jobs): t = threading.Thread(target=handle_test_cases) t.setDaemon(True) t.start() # Push all our test cases into the job queue. for i, t in enumerate(test_list): job_queue.put(TestCase(i, t, tests_dir, tmpdir, flags)) # Wait for all the jobs to be completed job_queue.join() # Wait for all the results to be compiled update_queue.join() # Flush our queues so the threads exit update_queue.put(None) for j in range(num_jobs): job_queue.put(None) return test_results def print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len): results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format( "TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0] test_results.sort(key=TestResult.sort_key) all_passed = True time_sum = 0 for test_result in test_results: all_passed = all_passed and test_result.was_successful time_sum += test_result.time test_result.padding = max_len_name results += str(test_result) testdir = test_result.testdir if combined_logs_len and os.path.isdir(testdir): # Print the final `combinedlogslen` lines of the combined logs print('{}Combine the logs and print the last {} lines ...{}'.format( BOLD[1], combined_logs_len, BOLD[0])) print('\n============') print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0])) print('============\n') combined_logs, _ = subprocess.Popen([sys.executable, os.path.join( tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate() print("\n".join(deque(combined_logs.splitlines(), combined_logs_len))) status = TICK + "Passed" if all_passed else CROSS + "Failed" if not all_passed: results += RED[1] results += BOLD[1] + "\n{} | {} | {} s (accumulated) \n".format( "ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0] if not all_passed: results += RED[0] results += "Runtime: {} s\n".format(runtime) print(results) class TestResult(): """ Simple data structure to store test result values and print them properly """ def __init__(self, num, name, testdir, status, time, stdout, stderr): self.num = num self.name = name self.testdir = testdir self.status = status self.time = time self.padding = 0 self.stdout = stdout self.stderr = stderr def sort_key(self): if self.status == "Passed": return 0, self.name.lower() elif self.status == "Failed": return 2, self.name.lower() elif self.status == "Skipped": return 1, self.name.lower() def __repr__(self): if self.status == "Passed": color = BLUE glyph = TICK elif self.status == "Failed": color = RED glyph = CROSS elif self.status == "Skipped": color = GREY glyph = CIRCLE return color[1] + "{} | {}{} | {} s\n".format( self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0] @property def was_successful(self): return self.status != "Failed" def get_all_scripts_from_disk(test_dir, non_scripts): """ Return all available test script from script directory (excluding NON_SCRIPTS) """ python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"]) return list(python_files - set(non_scripts)) def get_tests_to_run(test_list, test_params, cutoff, src_timings): """ Returns only test that will not run longer that cutoff. Long running tests are returned first to favor running tests in parallel Timings from build directory override those from src directory """ def get_test_time(test): # Return 0 if test is unknown to always run it return next( (x['time'] for x in src_timings.existing_timings if x['name'] == test), 0) # Some tests must also be run with additional parameters. Add them to the list. tests_with_params = [] for test_name in test_list: # always execute a test without parameters tests_with_params.append(test_name) params = test_params.get(test_name) if params is not None: tests_with_params.extend( [test_name + " " + " ".join(p) for p in params]) result = [t for t in tests_with_params if get_test_time(t) <= cutoff] result.sort(key=lambda x: (-get_test_time(x), x)) return result class RPCCoverage(): """ Coverage reporting utilities for test_runner. Coverage calculation works by having each test script subprocess write coverage files into a particular directory. These files contain the RPC commands invoked during testing, as well as a complete listing of RPC commands per `bitcoin-cli help` (`rpc_interface.txt`). After all tests complete, the commands run are combined and diff'd against the complete list to calculate uncovered RPC commands. See also: test/functional/test_framework/coverage.py """ def __init__(self): self.dir = tempfile.mkdtemp(prefix="coverage") self.flag = '--coveragedir={}'.format(self.dir) def report_rpc_coverage(self): """ Print out RPC commands that were unexercised by tests. """ uncovered = self._get_uncovered_rpc_commands() if uncovered: print("Uncovered RPC commands:") print("".join((" - {}\n".format(i)) for i in sorted(uncovered))) else: print("All RPC commands covered.") def cleanup(self): return shutil.rmtree(self.dir) def _get_uncovered_rpc_commands(self): """ Return a set of currently untested RPC commands. """ # This is shared from `test/functional/test-framework/coverage.py` reference_filename = 'rpc_interface.txt' coverage_file_prefix = 'coverage.' coverage_ref_filename = os.path.join(self.dir, reference_filename) coverage_filenames = set() all_cmds = set() covered_cmds = set() if not os.path.isfile(coverage_ref_filename): raise RuntimeError("No coverage reference found") with open(coverage_ref_filename, 'r', encoding="utf8") as f: all_cmds.update([i.strip() for i in f.readlines()]) for root, dirs, files in os.walk(self.dir): for filename in files: if filename.startswith(coverage_file_prefix): coverage_filenames.add(os.path.join(root, filename)) for filename in coverage_filenames: with open(filename, 'r', encoding="utf8") as f: covered_cmds.update([i.strip() for i in f.readlines()]) return all_cmds - covered_cmds def save_results_as_junit(test_results, file_name, time): """ Save tests results to file in JUnit format See http://llg.cubic.org/docs/junit/ for specification of format """ e_test_suite = ET.Element("testsuite", {"name": "bitcoin_abc_tests", "tests": str(len(test_results)), # "errors": "failures": str(len([t for t in test_results if t.status == "Failed"])), "id": "0", "skipped": str(len([t for t in test_results if t.status == "Skipped"])), "time": str(time), "timestamp": datetime.datetime.now().isoformat('T') }) for test_result in test_results: e_test_case = ET.SubElement(e_test_suite, "testcase", {"name": test_result.name, "classname": test_result.name, "time": str(test_result.time) } ) if test_result.status == "Skipped": ET.SubElement(e_test_case, "skipped") elif test_result.status == "Failed": ET.SubElement(e_test_case, "failure") # no special element for passed tests ET.SubElement(e_test_case, "system-out").text = test_result.stdout ET.SubElement(e_test_case, "system-err").text = test_result.stderr ET.ElementTree(e_test_suite).write( file_name, "UTF-8", xml_declaration=True) class Timings(): """ Takes care of loading, merging and saving tests execution times. """ def __init__(self, timing_file): self.timing_file = timing_file self.existing_timings = self.load_timings() def load_timings(self): if os.path.isfile(self.timing_file): with open(self.timing_file, encoding="utf8") as f: return json.load(f) else: return [] def get_merged_timings(self, new_timings): """ Return new list containing existing timings updated with new timings Tests that do not exists are not removed """ key = 'name' merged = {} for item in self.existing_timings + new_timings: if item[key] in merged: merged[item[key]].update(item) else: merged[item[key]] = item # Sort the result to preserve test ordering in file merged = list(merged.values()) merged.sort(key=lambda t, key=key: t[key]) return merged def save_timings(self, test_results): # we only save test that have passed - timings for failed test might be # wrong (timeouts or early fails) passed_results = [t for t in test_results if t.status == 'Passed'] new_timings = list(map(lambda t: {'name': t.name, 'time': t.time}, passed_results)) merged_timings = self.get_merged_timings(new_timings) with open(self.timing_file, 'w', encoding="utf8") as f: json.dump(merged_timings, f, indent=True) if __name__ == '__main__': main() diff --git a/test/functional/wallet_disable.py b/test/functional/wallet_disable.py index d53cfeed6..5a4ce914b 100755 --- a/test/functional/wallet_disable.py +++ b/test/functional/wallet_disable.py @@ -1,39 +1,39 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test a node with the -disablewallet option. - Test that validateaddress RPC works when running with -disablewallet - Test that it is not possible to mine to an invalid address. """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_raises_rpc_error class DisableWalletTest (BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [["-disablewallet"]] def run_test(self): # Make sure wallet is really disabled assert_raises_rpc_error(-32601, 'Method not found', self.nodes[0].getwalletinfo) x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy') assert x['isvalid'] == False x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ') assert x['isvalid'] == True # Checking mining to an address without a wallet. Generating to a valid address should succeed # but generating to an invalid address will fail. self.nodes[0].generatetoaddress( 1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ') assert_raises_rpc_error(-5, "Invalid address", self.nodes[0].generatetoaddress, 1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy') if __name__ == '__main__': DisableWalletTest().main() diff --git a/test/functional/wallet_dump.py b/test/functional/wallet_dump.py index 91caa0209..2b8470af3 100755 --- a/test/functional/wallet_dump.py +++ b/test/functional/wallet_dump.py @@ -1,156 +1,156 @@ #!/usr/bin/env python3 -# Copyright (c) 2016 The Bitcoin Core developers +# Copyright (c) 2016-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the dumpwallet RPC.""" import os from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error def read_dump(file_name, addrs, script_addrs, hd_master_addr_old): """ Read the given dump, count the addrs that match, count change and reserve. Also check that the old hd_master is inactive """ with open(file_name, encoding='utf8') as inputfile: found_addr = 0 found_script_addr = 0 found_addr_chg = 0 found_addr_rsv = 0 hd_master_addr_ret = None for line in inputfile: # only read non comment lines if line[0] != "#" and len(line) > 10: # split out some data key_label, comment = line.split("#") # key = key_label.split(" ")[0] keytype = key_label.split(" ")[2] if len(comment) > 1: addr_keypath = comment.split(" addr=")[1] addr = addr_keypath.split(" ")[0] keypath = None if keytype == "inactivehdseed=1": # ensure the old master is still available assert hd_master_addr_old == addr elif keytype == "hdseed=1": # ensure we have generated a new hd master key assert hd_master_addr_old != addr hd_master_addr_ret = addr elif keytype == "script=1": # scripts don't have keypaths keypath = None else: keypath = addr_keypath.rstrip().split("hdkeypath=")[1] # count key types for addrObj in addrs: if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=": found_addr += 1 break elif keytype == "change=1": found_addr_chg += 1 break elif keytype == "reserve=1": found_addr_rsv += 1 break # count scripts for script_addr in script_addrs: if script_addr == addr.rstrip() and keytype == "script=1": found_script_addr += 1 break return found_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret class WalletDumpTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [["-keypool=90"]] def setup_network(self, split=False): # Use 1 minute timeout because the initial getnewaddress RPC can take # longer than the default 30 seconds due to an expensive # CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in # the test often takes even longer. self.add_nodes(self.num_nodes, extra_args=self.extra_args, timewait=60) self.start_nodes() def run_test(self): tmpdir = self.options.tmpdir # generate 20 addresses to compare against the dump test_addr_count = 20 addrs = [] for i in range(0, test_addr_count): addr = self.nodes[0].getnewaddress() vaddr = self.nodes[0].getaddressinfo( addr) # required to get hd keypath addrs.append(vaddr) # Should be a no-op: self.nodes[0].keypoolrefill() # Test scripts dump by adding a 1-of-1 multisig address multisig_addr = self.nodes[0].addmultisigaddress( 1, [addrs[0]["address"]])["address"] # dump unencrypted wallet result = self.nodes[0].dumpwallet( tmpdir + "/node0/wallet.unencrypted.dump") assert_equal(result['filename'], os.path.abspath( tmpdir + "/node0/wallet.unencrypted.dump")) found_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \ read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, [multisig_addr], None) # all keys must be in the dump assert_equal(found_addr, test_addr_count) # all scripts must be in the dump assert_equal(found_script_addr, 1) # 50 blocks where mined assert_equal(found_addr_chg, 50) # 90 keys plus 100% internal keys assert_equal(found_addr_rsv, 90 * 2) # encrypt wallet, restart, unlock and dump self.nodes[0].node_encrypt_wallet('test') self.start_node(0) self.nodes[0].walletpassphrase('test', 10) # Should be a no-op: self.nodes[0].keypoolrefill() self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump") found_addr, found_script_addr, found_addr_chg, found_addr_rsv, _ = \ read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, [multisig_addr], hd_master_addr_unenc) assert_equal(found_addr, test_addr_count) assert_equal(found_script_addr, 1) # old reserve keys are marked as change now assert_equal(found_addr_chg, 90 * 2 + 50) assert_equal(found_addr_rsv, 90 * 2) # Overwriting should fail assert_raises_rpc_error(-8, "already exists", self.nodes[0].dumpwallet, tmpdir + "/node0/wallet.unencrypted.dump") # Restart node with new wallet, and test importwallet self.stop_node(0) self.start_node(0, ['-wallet=w2']) # Make sure the address is not IsMine before import result = self.nodes[0].getaddressinfo(multisig_addr) assert result['ismine'] == False self.nodes[0].importwallet(os.path.abspath( tmpdir + "/node0/wallet.unencrypted.dump")) # Now check IsMine is true result = self.nodes[0].getaddressinfo(multisig_addr) assert result['ismine'] == True if __name__ == '__main__': WalletDumpTest().main() diff --git a/test/functional/wallet_hd.py b/test/functional/wallet_hd.py index e3997bfaa..a8e9f9994 100755 --- a/test/functional/wallet_hd.py +++ b/test/functional/wallet_hd.py @@ -1,194 +1,194 @@ #!/usr/bin/env python3 -# Copyright (c) 2016 The Bitcoin Core developers +# Copyright (c) 2016-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test Hierarchical Deterministic wallet function.""" import os import shutil from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes_bi, assert_raises_rpc_error ) class WalletHDTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [[], ['-keypool=0']] def run_test(self): # Make sure can't switch off usehd after wallet creation self.stop_node(1) self.nodes[1].assert_start_raises_init_error( ['-usehd=0'], "Error: Error loading : You can't disable HD on an already existing HD wallet") self.start_node(1) connect_nodes_bi(self.nodes[0], self.nodes[1]) # Make sure we use hd, keep masterkeyid masterkeyid = self.nodes[1].getwalletinfo()['hdseedid'] assert_equal(masterkeyid, self.nodes[1].getwalletinfo()[ 'hdmasterkeyid']) assert_equal(len(masterkeyid), 40) # create an internal key change_addr = self.nodes[1].getrawchangeaddress() change_addrV = self.nodes[1].getaddressinfo(change_addr) # first internal child key assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") # Import a non-HD private key in the HD wallet non_hd_add = self.nodes[0].getnewaddress() self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add)) # This should be enough to keep the master key and the non-HD key self.nodes[1].backupwallet( os.path.join(self.nodes[1].datadir, "hd.bak")) #self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, "hd.dump")) # Derive some HD addresses and remember the last # Also send funds to each add self.nodes[0].generate(101) hd_add = None NUM_HD_ADDS = 10 for i in range(NUM_HD_ADDS): hd_add = self.nodes[1].getnewaddress() hd_info = self.nodes[1].getaddressinfo(hd_add) assert_equal(hd_info["hdkeypath"], "m/0'/0'/" + str(i) + "'") assert_equal(hd_info["hdseedid"], masterkeyid) assert_equal(hd_info["hdmasterkeyid"], masterkeyid) self.nodes[0].sendtoaddress(hd_add, 1) self.nodes[0].generate(1) self.nodes[0].sendtoaddress(non_hd_add, 1) self.nodes[0].generate(1) # create an internal key (again) change_addr = self.nodes[1].getrawchangeaddress() change_addrV = self.nodes[1].getaddressinfo(change_addr) # second internal child key assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") self.sync_all() assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1) self.log.info("Restore backup ...") self.stop_node(1) # we need to delete the complete regtest directory # otherwise node1 would auto-recover all funds in flag the keypool keys as used shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "blocks")) shutil.rmtree(os.path.join( self.nodes[1].datadir, "regtest", "chainstate")) shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join( self.nodes[1].datadir, "regtest", "wallets", "wallet.dat")) self.start_node(1) # Assert that derivation is deterministic hd_add_2 = None for i in range(NUM_HD_ADDS): hd_add_2 = self.nodes[1].getnewaddress() hd_info_2 = self.nodes[1].getaddressinfo(hd_add_2) assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/" + str(i) + "'") assert_equal(hd_info_2["hdseedid"], masterkeyid) assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid) assert_equal(hd_add, hd_add_2) connect_nodes_bi(self.nodes[0], self.nodes[1]) self.sync_all() # Needs rescan self.stop_node(1) self.start_node(1, extra_args=self.extra_args[1] + ['-rescan']) assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1) # Try a RPC based rescan self.stop_node(1) shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "blocks")) shutil.rmtree(os.path.join( self.nodes[1].datadir, "regtest", "chainstate")) shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join( self.nodes[1].datadir, "regtest", "wallets", "wallet.dat")) self.start_node(1, extra_args=self.extra_args[1]) connect_nodes_bi(self.nodes[0], self.nodes[1]) self.sync_all() # Wallet automatically scans blocks older than key on startup assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1) out = self.nodes[1].rescanblockchain(0, 1) assert_equal(out['start_height'], 0) assert_equal(out['stop_height'], 1) out = self.nodes[1].rescanblockchain(2, 4) assert_equal(out['start_height'], 2) assert_equal(out['stop_height'], 4) out = self.nodes[1].rescanblockchain(3) assert_equal(out['start_height'], 3) assert_equal(out['stop_height'], self.nodes[1].getblockcount()) out = self.nodes[1].rescanblockchain() assert_equal(out['start_height'], 0) assert_equal(out['stop_height'], self.nodes[1].getblockcount()) assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1) # send a tx and make sure its using the internal chain for the changeoutput txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1) outs = self.nodes[1].decoderawtransaction( self.nodes[1].gettransaction(txid)['hex'])['vout'] keypath = "" for out in outs: if out['value'] != 1: keypath = self.nodes[1].getaddressinfo( out['scriptPubKey']['addresses'][0])['hdkeypath'] assert_equal(keypath[0:7], "m/0'/1'") # Generate a new HD seed on node 1 and make sure it is set orig_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid'] self.nodes[1].sethdseed() new_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid'] assert orig_masterkeyid != new_masterkeyid addr = self.nodes[1].getnewaddress() # Make sure the new address is the first from the keypool assert_equal(self.nodes[1].getaddressinfo( addr)['hdkeypath'], 'm/0\'/0\'/0\'') self.nodes[1].keypoolrefill(1) # Fill keypool with 1 key # Set a new HD seed on node 1 without flushing the keypool new_seed = self.nodes[0].dumpprivkey(self.nodes[0].getnewaddress()) orig_masterkeyid = new_masterkeyid self.nodes[1].sethdseed(False, new_seed) new_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid'] assert orig_masterkeyid != new_masterkeyid addr = self.nodes[1].getnewaddress() assert_equal(orig_masterkeyid, self.nodes[1].getaddressinfo( addr)['hdseedid']) # Make sure the new address continues previous keypool assert_equal(self.nodes[1].getaddressinfo( addr)['hdkeypath'], 'm/0\'/0\'/1\'') # Check that the next address is from the new seed self.nodes[1].keypoolrefill(1) next_addr = self.nodes[1].getnewaddress() assert_equal(new_masterkeyid, self.nodes[1].getaddressinfo( next_addr)['hdseedid']) # Make sure the new address is not from previous keypool assert_equal(self.nodes[1].getaddressinfo( next_addr)['hdkeypath'], 'm/0\'/0\'/0\'') assert next_addr != addr # Sethdseed parameter validity assert_raises_rpc_error(-1, 'sethdseed', self.nodes[0].sethdseed, False, new_seed, 0) assert_raises_rpc_error(-5, "Invalid private key", self.nodes[1].sethdseed, False, "not_wif") assert_raises_rpc_error(-1, "JSON value is not a boolean as expected", self.nodes[1].sethdseed, "Not_bool") assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[1].sethdseed, False, True) assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, new_seed) assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, self.nodes[1].dumpprivkey(self.nodes[1].getnewaddress())) if __name__ == '__main__': WalletHDTest().main() diff --git a/test/functional/wallet_importmulti.py b/test/functional/wallet_importmulti.py index 60b6c56e0..eeb393e09 100755 --- a/test/functional/wallet_importmulti.py +++ b/test/functional/wallet_importmulti.py @@ -1,487 +1,487 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the importmulti RPC.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, ) class ImportMultiTest (BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True def setup_network(self, split=False): self.setup_nodes() def run_test(self): self.log.info("Mining blocks...") self.nodes[0].generate(1) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock( self.nodes[1].getbestblockhash())['mediantime'] node0_address1 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) # Check only one address assert_equal(node0_address1['ismine'], True) # Node 1 sync test assert_equal(self.nodes[1].getblockcount(), 1) # Address Test - before import address_info = self.nodes[1].getaddressinfo(node0_address1['address']) assert_equal(address_info['iswatchonly'], False) assert_equal(address_info['ismine'], False) # RPC importmulti ----------------------------------------------- # Bitcoin Address self.log.info("Should import an address") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "timestamp": "now", }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) assert_equal(address_assert['timestamp'], timestamp) watchonly_address = address['address'] watchonly_timestamp = timestamp self.log.info("Should not import an invalid address") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": "not valid address", }, "timestamp": "now", }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) assert_equal(result[0]['error']['message'], 'Invalid address') # ScriptPubKey + internal self.log.info("Should import a scriptPubKey with internal flag") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", "internal": True }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) assert_equal(address_assert['timestamp'], timestamp) # ScriptPubKey + !internal self.log.info("Should not import a scriptPubKey without internal flag") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # Address + Public key + !Internal self.log.info("Should import an address with public key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "timestamp": "now", "pubkeys": [address['pubkey']] }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) assert_equal(address_assert['timestamp'], timestamp) # ScriptPubKey + Public key + internal self.log.info( "Should import a scriptPubKey with internal and with public key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) request = [{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", "pubkeys": [address['pubkey']], "internal": True }] result = self.nodes[1].importmulti(request) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) assert_equal(address_assert['timestamp'], timestamp) # ScriptPubKey + Public key + !internal self.log.info( "Should not import a scriptPubKey without internal and with public key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) request = [{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", "pubkeys": [address['pubkey']] }] result = self.nodes[1].importmulti(request) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # Address + Private key + !watchonly self.log.info("Should import an address with private key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "timestamp": "now", "keys": [self.nodes[0].dumpprivkey(address['address'])] }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], True) assert_equal(address_assert['timestamp'], timestamp) self.log.info( "Should not import an address with private key if is already imported") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "timestamp": "now", "keys": [self.nodes[0].dumpprivkey(address['address'])] }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -4) assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script') # Address + Private key + watchonly self.log.info( "Should not import an address with private key and with watchonly") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "timestamp": "now", "keys": [self.nodes[0].dumpprivkey(address['address'])], "watchonly": True }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # ScriptPubKey + Private key + internal self.log.info( "Should import a scriptPubKey with internal and with private key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", "keys": [self.nodes[0].dumpprivkey(address['address'])], "internal": True }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], True) assert_equal(address_assert['timestamp'], timestamp) # ScriptPubKey + Private key + !internal self.log.info( "Should not import a scriptPubKey without internal and with private key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", "keys": [self.nodes[0].dumpprivkey(address['address'])] }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # P2SH address sig_address_1 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_2 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_3 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig( 2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) self.nodes[1].generate(100) self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock( self.nodes[1].getbestblockhash())['mediantime'] self.log.info("Should import a p2sh") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] }, "timestamp": "now", }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo( multi_sig_script['address']) assert_equal(address_assert['isscript'], True) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['timestamp'], timestamp) p2shunspent = self.nodes[1].listunspent( 0, 999999, [multi_sig_script['address']])[0] assert_equal(p2shunspent['spendable'], False) assert_equal(p2shunspent['solvable'], False) # P2SH + Redeem script sig_address_1 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_2 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_3 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig( 2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) self.nodes[1].generate(100) self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock( self.nodes[1].getbestblockhash())['mediantime'] self.log.info("Should import a p2sh with respective redeem script") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] }, "timestamp": "now", "redeemscript": multi_sig_script['redeemScript'] }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo( multi_sig_script['address']) assert_equal(address_assert['timestamp'], timestamp) p2shunspent = self.nodes[1].listunspent( 0, 999999, [multi_sig_script['address']])[0] assert_equal(p2shunspent['spendable'], False) assert_equal(p2shunspent['solvable'], True) # P2SH + Redeem script + Private Keys + !Watchonly sig_address_1 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_2 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_3 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig( 2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) self.nodes[1].generate(100) self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock( self.nodes[1].getbestblockhash())['mediantime'] self.log.info( "Should import a p2sh with respective redeem script and private keys") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] }, "timestamp": "now", "redeemscript": multi_sig_script['redeemScript'], "keys": [self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])] }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo( multi_sig_script['address']) assert_equal(address_assert['timestamp'], timestamp) p2shunspent = self.nodes[1].listunspent( 0, 999999, [multi_sig_script['address']])[0] assert_equal(p2shunspent['spendable'], False) assert_equal(p2shunspent['solvable'], True) # P2SH + Redeem script + Private Keys + Watchonly sig_address_1 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_2 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_3 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig( 2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) self.nodes[1].generate(100) self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock( self.nodes[1].getbestblockhash())['mediantime'] self.log.info( "Should import a p2sh with respective redeem script and private keys") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] }, "timestamp": "now", "redeemscript": multi_sig_script['redeemScript'], "keys": [self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])], "watchonly": True }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys') # Address + Public key + !Internal + Wrong pubkey self.log.info("Should not import an address with a wrong public key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "timestamp": "now", "pubkeys": [address2['pubkey']] }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) assert_equal(result[0]['error']['message'], 'Consistency check failed') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # ScriptPubKey + Public key + internal + Wrong pubkey self.log.info( "Should not import a scriptPubKey with internal and with a wrong public key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) request = [{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", "pubkeys": [address2['pubkey']], "internal": True }] result = self.nodes[1].importmulti(request) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) assert_equal(result[0]['error']['message'], 'Consistency check failed') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # Address + Private key + !watchonly + Wrong private key self.log.info("Should not import an address with a wrong private key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "timestamp": "now", "keys": [self.nodes[0].dumpprivkey(address2['address'])] }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) assert_equal(result[0]['error']['message'], 'Consistency check failed') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # ScriptPubKey + Private key + internal + Wrong private key self.log.info( "Should not import a scriptPubKey with internal and with a wrong private key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", "keys": [self.nodes[0].dumpprivkey(address2['address'])], "internal": True }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) assert_equal(result[0]['error']['message'], 'Consistency check failed') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # Importing existing watch only address with new timestamp should replace saved timestamp. assert_greater_than(timestamp, watchonly_timestamp) self.log.info("Should replace previously saved watch only timestamp.") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": watchonly_address, }, "timestamp": "now", }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo(watchonly_address) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) assert_equal(address_assert['timestamp'], timestamp) watchonly_timestamp = timestamp # restart nodes to check for proper serialization/deserialization of watch only address self.stop_nodes() self.start_nodes() address_assert = self.nodes[1].getaddressinfo(watchonly_address) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) assert_equal(address_assert['timestamp'], watchonly_timestamp) # Bad or missing timestamps self.log.info("Should throw on invalid or missing timestamp values") assert_raises_rpc_error(-3, 'Missing required timestamp field for key', self.nodes[1].importmulti, [{ "scriptPubKey": address['scriptPubKey'], }]) assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string', self.nodes[1].importmulti, [{ "scriptPubKey": address['scriptPubKey'], "timestamp": "", }]) if __name__ == '__main__': ImportMultiTest().main() diff --git a/test/functional/wallet_keypool.py b/test/functional/wallet_keypool.py index e509ae990..fb6c828c4 100755 --- a/test/functional/wallet_keypool.py +++ b/test/functional/wallet_keypool.py @@ -1,96 +1,96 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the wallet keypool and interaction with wallet encryption/locking.""" import time from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error class KeyPoolTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 def run_test(self): nodes = self.nodes addr_before_encrypting = nodes[0].getnewaddress() addr_before_encrypting_data = nodes[ 0].getaddressinfo(addr_before_encrypting) wallet_info_old = nodes[0].getwalletinfo() assert_equal(wallet_info_old['hdseedid'], wallet_info_old['hdmasterkeyid']) assert addr_before_encrypting_data[ 'hdseedid'] == wallet_info_old['hdseedid'] # Encrypt wallet and wait to terminate nodes[0].node_encrypt_wallet('test') # Restart node 0 self.start_node(0) # Keep creating keys addr = nodes[0].getnewaddress() addr_data = nodes[0].getaddressinfo(addr) wallet_info = nodes[0].getwalletinfo() assert_equal(wallet_info['hdseedid'], wallet_info['hdmasterkeyid']) assert addr_before_encrypting_data[ 'hdseedid'] != wallet_info['hdseedid'] assert addr_data['hdseedid'] == wallet_info['hdseedid'] assert_raises_rpc_error( -12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress) # put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min) nodes[0].walletpassphrase('test', 12000) nodes[0].keypoolrefill(6) nodes[0].walletlock() wi = nodes[0].getwalletinfo() assert_equal(wi['keypoolsize_hd_internal'], 6) assert_equal(wi['keypoolsize'], 6) # drain the internal keys nodes[0].getrawchangeaddress() nodes[0].getrawchangeaddress() nodes[0].getrawchangeaddress() nodes[0].getrawchangeaddress() nodes[0].getrawchangeaddress() nodes[0].getrawchangeaddress() addr = set() # the next one should fail assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getrawchangeaddress) # drain the external keys addr.add(nodes[0].getnewaddress()) addr.add(nodes[0].getnewaddress()) addr.add(nodes[0].getnewaddress()) addr.add(nodes[0].getnewaddress()) addr.add(nodes[0].getnewaddress()) addr.add(nodes[0].getnewaddress()) assert len(addr) == 6 # the next one should fail assert_raises_rpc_error( -12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress) # refill keypool with three new addresses nodes[0].walletpassphrase('test', 1) nodes[0].keypoolrefill(3) # test walletpassphrase timeout time.sleep(1.1) assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0) # drain them by mining nodes[0].generate(1) nodes[0].generate(1) nodes[0].generate(1) assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].generate, 1) nodes[0].walletpassphrase('test', 100) nodes[0].keypoolrefill(100) wi = nodes[0].getwalletinfo() assert_equal(wi['keypoolsize_hd_internal'], 100) assert_equal(wi['keypoolsize'], 100) if __name__ == '__main__': KeyPoolTest().main() diff --git a/test/functional/wallet_keypool_topup.py b/test/functional/wallet_keypool_topup.py index f976dfb9f..4d2bcd7f2 100755 --- a/test/functional/wallet_keypool_topup.py +++ b/test/functional/wallet_keypool_topup.py @@ -1,72 +1,72 @@ #!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test HD Wallet keypool restore function. Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks. - Start node1, shutdown and backup wallet. - Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110. - Stop node1, clear the datadir, move wallet file back into the datadir and restart node1. - connect node1 to node0. Verify that they sync and node1 receives its funds.""" import os import shutil from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes_bi, sync_blocks, ) class KeypoolRestoreTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [[], ['-keypool=100']] def run_test(self): wallet_path = os.path.join( self.nodes[1].datadir, "regtest", "wallets", "wallet.dat") wallet_backup_path = os.path.join(self.nodes[1].datadir, "wallet.bak") self.nodes[0].generate(101) self.log.info("Make backup of wallet") self.stop_node(1) shutil.copyfile(wallet_path, wallet_backup_path) self.start_node(1, self.extra_args[1]) connect_nodes_bi(self.nodes[0], self.nodes[1]) self.log.info("Generate keys for wallet") for _ in range(90): addr_oldpool = self.nodes[1].getnewaddress() for _ in range(20): addr_extpool = self.nodes[1].getnewaddress() self.log.info("Send funds to wallet") self.nodes[0].sendtoaddress(addr_oldpool, 10) self.nodes[0].generate(1) self.nodes[0].sendtoaddress(addr_extpool, 5) self.nodes[0].generate(1) sync_blocks(self.nodes) self.log.info("Restart node with wallet backup") self.stop_node(1) shutil.copyfile(wallet_backup_path, wallet_path) self.start_node(1, self.extra_args[1]) connect_nodes_bi(self.nodes[0], self.nodes[1]) self.sync_all() self.log.info("Verify keypool is restored and balance is correct") assert_equal(self.nodes[1].getbalance(), 15) assert_equal(self.nodes[1].listtransactions() [0]['category'], "receive") # Check that we have marked all keys up to the used keypool key as used assert_equal(self.nodes[1].getaddressinfo( self.nodes[1].getnewaddress())['hdkeypath'], "m/0'/0'/110'") if __name__ == '__main__': KeypoolRestoreTest().main() diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py index eab6917bc..bc96948ea 100755 --- a/test/functional/wallet_multiwallet.py +++ b/test/functional/wallet_multiwallet.py @@ -1,288 +1,288 @@ #!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test multiwallet. Verify that a bitcoind node can load multiple wallet files """ import os import shutil from test_framework.test_framework import BitcoinTestFramework from test_framework.test_node import ErrorMatch from test_framework.util import ( assert_equal, assert_raises_rpc_error, ) class MultiWalletTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.supports_cli = True def run_test(self): node = self.nodes[0] data_dir = lambda *p: os.path.join(node.datadir, 'regtest', *p) wallet_dir = lambda *p: data_dir('wallets', *p) def wallet(name): return node.get_wallet_rpc(name) # check wallet.dat is created self.stop_nodes() assert_equal(os.path.isfile(wallet_dir('wallet.dat')), True) # create symlink to verify wallet directory path can be referenced # through symlink os.mkdir(wallet_dir('w7')) os.symlink('w7', wallet_dir('w7_symlink')) # rename wallet.dat to make sure plain wallet file paths (as opposed to # directory paths) can be loaded os.rename(wallet_dir("wallet.dat"), wallet_dir("w8")) # restart node with a mix of wallet names: # w1, w2, w3 - to verify new wallets created when non-existing paths specified # w - to verify wallet name matching works when one wallet path is prefix of another # sub/w5 - to verify relative wallet path is created correctly # extern/w6 - to verify absolute wallet path is created correctly # w7_symlink - to verify symlinked wallet path is initialized correctly # w8 - to verify existing wallet file is loaded correctly # '' - to verify default wallet file is created correctly wallet_names = ['w1', 'w2', 'w3', 'w', 'sub/w5', os.path.join(self.options.tmpdir, 'extern/w6'), 'w7_symlink', 'w8', ''] extra_args = ['-wallet={}'.format(n) for n in wallet_names] self.start_node(0, extra_args) assert_equal(set(node.listwallets()), set(wallet_names)) # check that all requested wallets were created self.stop_node(0) for wallet_name in wallet_names: if os.path.isdir(wallet_dir(wallet_name)): assert_equal(os.path.isfile( wallet_dir(wallet_name, "wallet.dat")), True) else: assert_equal(os.path.isfile(wallet_dir(wallet_name)), True) # should not initialize if wallet path can't be created exp_stderr = "boost::filesystem::create_directory: (The system cannot find the path specified|Not a directory):" self.nodes[0].assert_start_raises_init_error( ['-wallet=wallet.dat/bad'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX) self.nodes[0].assert_start_raises_init_error( ['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist') self.nodes[0].assert_start_raises_init_error( ['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir()) self.nodes[0].assert_start_raises_init_error( ['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir()) # should not initialize if there are duplicate wallets self.nodes[0].assert_start_raises_init_error( ['-wallet=w1', '-wallet=w1'], 'Error: Error loading wallet w1. Duplicate -wallet filename specified.') # should not initialize if one wallet is a copy of another shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy')) exp_stderr = r"BerkeleyBatch: Can't open database w8_copy \(duplicates fileid \w+ from w8\)" self.nodes[0].assert_start_raises_init_error( ['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX) # should not initialize if wallet file is a symlink os.symlink('w8', wallet_dir('w8_symlink')) self.nodes[0].assert_start_raises_init_error( ['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX) # should not initialize if the specified walletdir does not exist self.nodes[0].assert_start_raises_init_error( ['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist') # should not initialize if the specified walletdir is not a directory not_a_dir = wallet_dir('notadir') open(not_a_dir, 'a', encoding="utf8").close() self.nodes[0].assert_start_raises_init_error( ['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory') # if wallets/ doesn't exist, datadir should be the default wallet dir wallet_dir2 = data_dir('walletdir') os.rename(wallet_dir(), wallet_dir2) self.start_node(0, ['-wallet=w4', '-wallet=w5']) assert_equal(set(node.listwallets()), {"w4", "w5"}) w5 = wallet("w5") w5.generate(1) # now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded os.rename(wallet_dir2, wallet_dir()) self.restart_node(0, ['-wallet=w4', '-wallet=w5', '-walletdir=' + data_dir()]) assert_equal(set(node.listwallets()), {"w4", "w5"}) w5 = wallet("w5") w5_info = w5.getwalletinfo() assert_equal(w5_info['immature_balance'], 50) competing_wallet_dir = os.path.join( self.options.tmpdir, 'competing_walletdir') os.mkdir(competing_wallet_dir) self.restart_node(0, ['-walletdir=' + competing_wallet_dir]) exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\"!" self.nodes[1].assert_start_raises_init_error( ['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX) self.restart_node(0, extra_args) wallets = [wallet(w) for w in wallet_names] wallet_bad = wallet("bad") # check wallet names and balances wallets[0].generate(1) for wallet_name, wallet in zip(wallet_names, wallets): info = wallet.getwalletinfo() assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0) assert_equal(info['walletname'], wallet_name) # accessing invalid wallet fails assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo) # accessing wallet RPC without using wallet endpoint fails assert_raises_rpc_error(-19, "Wallet file not specified (must request wallet RPC through /wallet/ uri-path).", node.getwalletinfo) w1, w2, w3, w4, *_ = wallets w1.generate(101) assert_equal(w1.getbalance(), 100) assert_equal(w2.getbalance(), 0) assert_equal(w3.getbalance(), 0) assert_equal(w4.getbalance(), 0) w1.sendtoaddress(w2.getnewaddress(), 1) w1.sendtoaddress(w3.getnewaddress(), 2) w1.sendtoaddress(w4.getnewaddress(), 3) w1.generate(1) assert_equal(w2.getbalance(), 1) assert_equal(w3.getbalance(), 2) assert_equal(w4.getbalance(), 3) batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()]) assert_equal(batch[0]["result"]["chain"], "regtest") assert_equal(batch[1]["result"]["walletname"], "w1") self.log.info('Check for per-wallet settxfee call') assert_equal(w1.getwalletinfo()['paytxfee'], 0) assert_equal(w2.getwalletinfo()['paytxfee'], 0) w2.settxfee(4.0) assert_equal(w1.getwalletinfo()['paytxfee'], 0) assert_equal(w2.getwalletinfo()['paytxfee'], 4.0) self.log.info("Test dynamic wallet loading") self.restart_node(0, ['-nowallet']) assert_equal(node.listwallets(), []) assert_raises_rpc_error(-32601, "Method not found", node.getwalletinfo) self.log.info("Load first wallet") loadwallet_name = node.loadwallet(wallet_names[0]) assert_equal(loadwallet_name['name'], wallet_names[0]) assert_equal(node.listwallets(), wallet_names[0:1]) node.getwalletinfo() w1 = node.get_wallet_rpc(wallet_names[0]) w1.getwalletinfo() self.log.info("Load second wallet") loadwallet_name = node.loadwallet(wallet_names[1]) assert_equal(loadwallet_name['name'], wallet_names[1]) assert_equal(node.listwallets(), wallet_names[0:2]) assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo) w2 = node.get_wallet_rpc(wallet_names[1]) w2.getwalletinfo() self.log.info("Load remaining wallets") for wallet_name in wallet_names[2:]: loadwallet_name = self.nodes[0].loadwallet(wallet_name) assert_equal(loadwallet_name['name'], wallet_name) assert_equal(set(self.nodes[0].listwallets()), set(wallet_names)) # Fail to load if wallet doesn't exist assert_raises_rpc_error(-18, 'Wallet wallets not found.', self.nodes[0].loadwallet, 'wallets') # Fail to load duplicate wallets assert_raises_rpc_error(-4, 'Wallet file verification failed: Error loading wallet w1. Duplicate -wallet filename specified.', self.nodes[0].loadwallet, wallet_names[0]) # Fail to load if one wallet is a copy of another assert_raises_rpc_error(-1, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy') # Fail to load if wallet file is a symlink assert_raises_rpc_error(-4, "Wallet file verification failed: Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink') # Fail to load if a directory is specified that doesn't contain a wallet os.mkdir(wallet_dir('empty_wallet_dir')) assert_raises_rpc_error(-18, "Directory empty_wallet_dir does not contain a wallet.dat file", self.nodes[0].loadwallet, 'empty_wallet_dir') self.log.info("Test dynamic wallet creation.") # Fail to create a wallet if it already exists. assert_raises_rpc_error(-4, "Wallet w2 already exists.", self.nodes[0].createwallet, 'w2') # Successfully create a wallet with a new name loadwallet_name = self.nodes[0].createwallet('w9') assert_equal(loadwallet_name['name'], 'w9') w9 = node.get_wallet_rpc('w9') assert_equal(w9.getwalletinfo()['walletname'], 'w9') assert 'w9' in self.nodes[0].listwallets() # Successfully create a wallet using a full path new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir') new_wallet_name = os.path.join(new_wallet_dir, 'w10') loadwallet_name = self.nodes[0].createwallet(new_wallet_name) assert_equal(loadwallet_name['name'], new_wallet_name) w10 = node.get_wallet_rpc(new_wallet_name) assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name) assert new_wallet_name in self.nodes[0].listwallets() self.log.info("Test dynamic wallet unloading") # Test `unloadwallet` errors assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet) assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy") assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet) assert_raises_rpc_error(-8, "Cannot unload the requested wallet", w1.unloadwallet, "w2"), # Successfully unload the specified wallet name self.nodes[0].unloadwallet("w1") assert 'w1' not in self.nodes[0].listwallets() # Successfully unload the wallet referenced by the request endpoint w2.unloadwallet() assert 'w2' not in self.nodes[0].listwallets() # Successfully unload all wallets for wallet_name in self.nodes[0].listwallets(): self.nodes[0].unloadwallet(wallet_name) assert_equal(self.nodes[0].listwallets(), []) assert_raises_rpc_error(-32601, "Method not found (wallet method is disabled because no wallet is loaded)", self.nodes[0].getwalletinfo) # Successfully load a previously unloaded wallet self.nodes[0].loadwallet('w1') assert_equal(self.nodes[0].listwallets(), ['w1']) assert_equal(w1.getwalletinfo()['walletname'], 'w1') if __name__ == '__main__': MultiWalletTest().main() diff --git a/test/functional/wallet_txn_clone.py b/test/functional/wallet_txn_clone.py index cd05e4d87..0a66660af 100755 --- a/test/functional/wallet_txn_clone.py +++ b/test/functional/wallet_txn_clone.py @@ -1,178 +1,178 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes, disconnect_nodes, sync_blocks, ) class TxnMallTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.extra_args = [["-noparkdeepreorg"], ["-noparkdeepreorg"], [], []] def add_options(self, parser): parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true", help="Test double-spend of 1-confirmed transaction") def setup_network(self): # Start with split network: super(TxnMallTest, self).setup_network() disconnect_nodes(self.nodes[1], self.nodes[2]) disconnect_nodes(self.nodes[2], self.nodes[1]) def run_test(self): output_type = "legacy" # All nodes should start with 1,250 BCH: starting_balance = 1250 for i in range(4): assert_equal(self.nodes[i].getbalance(), starting_balance) # bug workaround, coins generated assigned to first getnewaddress! self.nodes[i].getnewaddress("") # Assign coins to foo and bar accounts: self.nodes[0].settxfee(.001) node0_address_foo = self.nodes[0].getnewaddress("foo", output_type) fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219) fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid) node0_address_bar = self.nodes[0].getnewaddress("bar", output_type) fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29) fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid) assert_equal(self.nodes[0].getbalance(""), starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"]) # Coins are sent to node1_address node1_address = self.nodes[1].getnewaddress("from0") # Send tx1, and another transaction tx2 that won't be cloned txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0) txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0) # Construct a clone of tx1, to be malleated rawtx1 = self.nodes[0].getrawtransaction(txid1, 1) clone_inputs = [{"txid": rawtx1["vin"][0] ["txid"], "vout":rawtx1["vin"][0]["vout"]}] clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][0]["value"], rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][1]["value"]} clone_locktime = rawtx1["locktime"] clone_raw = self.nodes[0].createrawtransaction( clone_inputs, clone_outputs, clone_locktime) # createrawtransaction randomizes the order of its outputs, so swap them if necessary. # output 0 is at version+#inputs+input+sigstub+sequence+#outputs # 40 BCH serialized is 00286bee00000000 pos0 = 2 * (4 + 1 + 36 + 1 + 4 + 1) hex40 = "00286bee00000000" output_len = 16 + 2 + 2 * \ int("0x" + clone_raw[pos0 + 16: pos0 + 16 + 2], 0) if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0: pos0 + 16] != hex40 or rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0: pos0 + 16] == hex40): output0 = clone_raw[pos0: pos0 + output_len] output1 = clone_raw[pos0 + output_len: pos0 + 2 * output_len] clone_raw = clone_raw[:pos0] + output1 + \ output0 + clone_raw[pos0 + 2 * output_len:] # Use a different signature hash type to sign. This creates an equivalent but malleated clone. # Don't send the clone anywhere yet tx1_clone = self.nodes[0].signrawtransactionwithwallet( clone_raw, None, "ALL|FORKID|ANYONECANPAY") assert_equal(tx1_clone["complete"], True) # Have node0 mine a block, if requested: if (self.options.mine_block): self.nodes[0].generate(1) sync_blocks(self.nodes[0:2]) tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Node0's balance should be starting balance, plus 50BTC for another # matured block, minus tx1 and tx2 amounts, and minus transaction fees: expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] if self.options.mine_block: expected += 50 expected += tx1["amount"] + tx1["fee"] expected += tx2["amount"] + tx2["fee"] assert_equal(self.nodes[0].getbalance(), expected) # foo and bar accounts should be debited: assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"]) assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"]) if self.options.mine_block: assert_equal(tx1["confirmations"], 1) assert_equal(tx2["confirmations"], 1) # Node1's "from0" balance should be both transaction amounts: assert_equal(self.nodes[1].getbalance( "from0"), -(tx1["amount"] + tx2["amount"])) else: assert_equal(tx1["confirmations"], 0) assert_equal(tx2["confirmations"], 0) # Send clone and its parent to miner self.nodes[2].sendrawtransaction(fund_foo_tx["hex"]) txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"]) # ... mine a block... self.nodes[2].generate(1) # Reconnect the split network, and sync chain: connect_nodes(self.nodes[1], self.nodes[2]) self.nodes[2].sendrawtransaction(fund_bar_tx["hex"]) self.nodes[2].sendrawtransaction(tx2["hex"]) self.nodes[2].generate(1) # Mine another block to make sure we sync sync_blocks(self.nodes) # Re-fetch transaction info: tx1 = self.nodes[0].gettransaction(txid1) tx1_clone = self.nodes[0].gettransaction(txid1_clone) tx2 = self.nodes[0].gettransaction(txid2) # Verify expected confirmations assert_equal(tx1["confirmations"], -2) assert_equal(tx1_clone["confirmations"], 2) assert_equal(tx2["confirmations"], 1) # Check node0's total balance; should be same as before the clone, + 100 BCH for 2 matured, # less possible orphaned matured subsidy expected += 100 if (self.options.mine_block): expected -= 50 assert_equal(self.nodes[0].getbalance(), expected) assert_equal(self.nodes[0].getbalance("*", 0), expected) # Check node0's individual account balances. # "foo" should have been debited by the equivalent clone of tx1 assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"]) # "bar" should have been debited by (possibly unconfirmed) tx2 assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"]) # "" should have starting balance, less funding txes, plus subsidies assert_equal(self.nodes[0].getbalance("", 0), starting_balance - 1219 + fund_foo_tx["fee"] - 29 + fund_bar_tx["fee"] + 100) # Node1's "from0" account balance assert_equal(self.nodes[1].getbalance( "from0", 0), -(tx1["amount"] + tx2["amount"])) if __name__ == '__main__': TxnMallTest().main() diff --git a/test/functional/wallet_txn_doublespend.py b/test/functional/wallet_txn_doublespend.py index 99dfd6daf..8e70cf5d0 100755 --- a/test/functional/wallet_txn_doublespend.py +++ b/test/functional/wallet_txn_doublespend.py @@ -1,159 +1,159 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the wallet accounts properly when there is a double-spend conflict.""" from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes, disconnect_nodes, find_output, sync_blocks, ) class TxnMallTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.extra_args = [["-noparkdeepreorg"], ["-noparkdeepreorg"], [], []] def add_options(self, parser): parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true", help="Test double-spend of 1-confirmed transaction") def setup_network(self): # Start with split network: super().setup_network() disconnect_nodes(self.nodes[1], self.nodes[2]) disconnect_nodes(self.nodes[2], self.nodes[1]) def run_test(self): # All nodes should start with 1,250 BCH: starting_balance = 1250 for i in range(4): assert_equal(self.nodes[i].getbalance(), starting_balance) # bug workaround, coins generated assigned to first getnewaddress! self.nodes[i].getnewaddress("") # Assign coins to foo and bar accounts: node0_address_foo = self.nodes[0].getnewaddress("foo") fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219) fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid) node0_address_bar = self.nodes[0].getnewaddress("bar") fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29) fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid) assert_equal(self.nodes[0].getbalance(""), starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"]) # Coins are sent to node1_address node1_address = self.nodes[1].getnewaddress("from0") # First: use raw transaction API to send 1240 BCH to node1_address, # but don't broadcast: doublespend_fee = Decimal('-.02') rawtx_input_0 = {} rawtx_input_0["txid"] = fund_foo_txid rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219) rawtx_input_1 = {} rawtx_input_1["txid"] = fund_bar_txid rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29) inputs = [rawtx_input_0, rawtx_input_1] change_address = self.nodes[0].getnewaddress() outputs = {} outputs[node1_address] = 1240 outputs[change_address] = 1248 - 1240 + doublespend_fee rawtx = self.nodes[0].createrawtransaction(inputs, outputs) doublespend = self.nodes[0].signrawtransactionwithwallet(rawtx) assert_equal(doublespend["complete"], True) # Create two spends using 1 50 BCH coin each txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0) txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0) # Have node0 mine a block: if (self.options.mine_block): self.nodes[0].generate(1) sync_blocks(self.nodes[0:2]) tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Node0's balance should be starting balance, plus 50BTC for another # matured block, minus 40, minus 20, and minus transaction fees: expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] if self.options.mine_block: expected += 50 expected += tx1["amount"] + tx1["fee"] expected += tx2["amount"] + tx2["fee"] assert_equal(self.nodes[0].getbalance(), expected) # foo and bar accounts should be debited: assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"]) assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"]) if self.options.mine_block: assert_equal(tx1["confirmations"], 1) assert_equal(tx2["confirmations"], 1) # Node1's "from0" balance should be both transaction amounts: assert_equal(self.nodes[1].getbalance( "from0"), -(tx1["amount"] + tx2["amount"])) else: assert_equal(tx1["confirmations"], 0) assert_equal(tx2["confirmations"], 0) # Now give doublespend and its parents to miner: self.nodes[2].sendrawtransaction(fund_foo_tx["hex"]) self.nodes[2].sendrawtransaction(fund_bar_tx["hex"]) doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"]) # ... mine a block... self.nodes[2].generate(1) # Reconnect the split network, and sync chain: connect_nodes(self.nodes[1], self.nodes[2]) self.nodes[2].generate(1) # Mine another block to make sure we sync sync_blocks(self.nodes) assert_equal(self.nodes[0].gettransaction( doublespend_txid)["confirmations"], 2) # Re-fetch transaction info: tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Both transactions should be conflicted assert_equal(tx1["confirmations"], -2) assert_equal(tx2["confirmations"], -2) # Node0's total balance should be starting balance, plus 100BTC for # two more matured blocks, minus 1240 for the double-spend, plus fees (which are # negative): expected = starting_balance + 100 - 1240 + \ fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee assert_equal(self.nodes[0].getbalance(), expected) assert_equal(self.nodes[0].getbalance("*"), expected) # Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies + # fees (which are negative) assert_equal(self.nodes[0].getbalance("foo"), 1219) assert_equal(self.nodes[0].getbalance("bar"), 29) assert_equal(self.nodes[0].getbalance(""), starting_balance - 1219 - 29 - 1240 + 100 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee) # Node1's "from0" account balance should be just the doublespend: assert_equal(self.nodes[1].getbalance("from0"), 1240) if __name__ == '__main__': TxnMallTest().main() diff --git a/test/lint/check-doc.py b/test/lint/check-doc.py index e6d7a6e66..7e1934ebd 100755 --- a/test/lint/check-doc.py +++ b/test/lint/check-doc.py @@ -1,70 +1,70 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2015-2019 The Bitcoin Core developers # Copyright (c) 2019 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' This checks if all command line args are documented. Return value is 0 to indicate no error. Author: @MarcoFalke ''' from subprocess import check_output from pprint import PrettyPrinter import glob import re TOP_LEVEL = 'git rev-parse --show-toplevel' FOLDER_SRC = '/src/**/' FOLDER_TEST = '/src/**/test/' EXTENSIONS = ["*.c", "*.h", "*.cpp", "*.cc", "*.hpp"] REGEX_ARG = r'(?:ForceSet|SoftSet|Get|Is)(?:Bool)?Args?(?:Set)?\(\s*"(-[^"]+)"' REGEX_DOC = r'AddArg\(\s*"(-[^"=]+?)(?:=|")' # list false positive unknows arguments SET_FALSE_POSITIVE_UNKNOWNS = set(['-zmqpubhashblock', '-zmqpubhashtx', '-zmqpubrawblock', '-zmqpubrawtx']) def main(): top_level = check_output(TOP_LEVEL, shell=True, universal_newlines=True).strip() source_files = [] test_files = [] for extension in EXTENSIONS: source_files += glob.glob(top_level + FOLDER_SRC + extension, recursive=True) test_files += glob.glob(top_level + FOLDER_TEST + extension, recursive=True) files = set(source_files) - set(test_files) args_used = set() args_docd = set() for file in files: with open(file, 'r', encoding='utf-8') as f: content = f.read() args_used |= set(re.findall(re.compile(REGEX_ARG), content)) args_docd |= set(re.findall(re.compile(REGEX_DOC), content)) args_used |= SET_FALSE_POSITIVE_UNKNOWNS args_need_doc = args_used - args_docd args_unknown = args_docd - args_used pp = PrettyPrinter() print("Args used : {}".format(len(args_used))) print("Args documented : {}".format(len(args_docd))) print("Args undocumented: {}".format(len(args_need_doc))) pp.pprint(args_need_doc) print("Args unknown : {}".format(len(args_unknown))) pp.pprint(args_unknown) if __name__ == "__main__": main() diff --git a/test/lint/lint-format-strings.py b/test/lint/lint-format-strings.py index 819c03770..3a1f837c7 100755 --- a/test/lint/lint-format-strings.py +++ b/test/lint/lint-format-strings.py @@ -1,289 +1,289 @@ #!/usr/bin/env python3 # -# Copyright (c) 2018 The Bitcoin Core developers +# Copyright (c) 2018-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Lint format strings: This program checks that the number of arguments passed # to a variadic format string function matches the number of format specifiers # in the format string. import argparse import doctest import re import sys FALSE_POSITIVES = [ ("src/dbwrapper.cpp", "vsnprintf(p, limit - p, format, backup_ap)"), ("src/index/base.cpp", "FatalError(const char *fmt, const Args &... args)"), ("src/netbase.cpp", "LogConnectFailure(bool manual_connection, const char *fmt, const Args &... args)"), ("src/util/system.cpp", "strprintf(_(COPYRIGHT_HOLDERS), _(COPYRIGHT_HOLDERS_SUBSTITUTION))"), ("src/seeder/main.cpp", "fprintf(stderr, help, argv[0])"), ("src/tinyformat.h", "printf(const char *fmt, const Args &... args)"), ("src/tinyformat.h", "printf(const char *fmt, TINYFORMAT_VARARGS(n))"), ] FUNCTION_NAMES_AND_NUMBER_OF_LEADING_ARGUMENTS = [ ("FatalError", 0), ("fprintf", 1), ("LogConnectFailure", 1), ("LogPrint", 1), ("LogPrintf", 0), ("printf", 0), ("snprintf", 2), ("sprintf", 1), ("strprintf", 0), ("vfprintf", 1), ("vprintf", 1), ("vsnprintf", 1), ("vsprintf", 1), ] def parse_function_calls(function_name, source_code): """Return an array with all calls to function function_name in string source_code. Preprocessor directives and C++ style comments ("//") in source_code are removed. >>> len(parse_function_calls("foo", "foo();bar();foo();bar();")) 2 >>> parse_function_calls("foo", "foo(1);bar(1);foo(2);bar(2);")[0].startswith("foo(1);") True >>> parse_function_calls("foo", "foo(1);bar(1);foo(2);bar(2);")[1].startswith("foo(2);") True >>> len(parse_function_calls("foo", "foo();bar();// foo();bar();")) 1 >>> len(parse_function_calls("foo", "#define FOO foo();")) 0 """ assert type(function_name) is str and type( source_code) is str and function_name lines = [re.sub("// .*", " ", line).strip() for line in source_code.split("\n") if not line.strip().startswith("#")] return re.findall(r"[^a-zA-Z_](?=({}\(.*).*)".format(function_name), " " + " ".join(lines)) def normalize(s): """Return a normalized version of string s with newlines, tabs and C style comments ("/* ... */") replaced with spaces. Multiple spaces are replaced with a single space. >>> normalize(" /* nothing */ foo\tfoo /* bar */ foo ") 'foo foo foo' """ assert type(s) is str s = s.replace("\n", " ") s = s.replace("\t", " ") s = re.sub(r"/\*.*?\*/", " ", s) s = re.sub(" {2,}", " ", s) return s.strip() ESCAPE_MAP = { r"\n": "[escaped-newline]", r"\t": "[escaped-tab]", r'\"': "[escaped-quote]", } def escape(s): """Return the escaped version of string s with "\\\"", "\\n" and "\\t" escaped as "[escaped-backslash]", "[escaped-newline]" and "[escaped-tab]". >>> unescape(escape("foo")) == "foo" True >>> escape(r'foo \\t foo \\n foo \\\\ foo \\ foo \\"bar\\"') 'foo [escaped-tab] foo [escaped-newline] foo \\\\\\\\ foo \\\\ foo [escaped-quote]bar[escaped-quote]' """ assert type(s) is str for raw_value, escaped_value in ESCAPE_MAP.items(): s = s.replace(raw_value, escaped_value) return s def unescape(s): """Return the unescaped version of escaped string s. Reverses the replacements made in function escape(s). >>> unescape(escape("bar")) 'bar' >>> unescape("foo [escaped-tab] foo [escaped-newline] foo \\\\\\\\ foo \\\\ foo [escaped-quote]bar[escaped-quote]") 'foo \\\\t foo \\\\n foo \\\\\\\\ foo \\\\ foo \\\\"bar\\\\"' """ assert type(s) is str for raw_value, escaped_value in ESCAPE_MAP.items(): s = s.replace(escaped_value, raw_value) return s def parse_function_call_and_arguments(function_name, function_call): """Split string function_call into an array of strings consisting of: * the string function_call followed by "(" * the function call argument #1 * ... * the function call argument #n * a trailing ");" The strings returned are in escaped form. See escape(...). >>> parse_function_call_and_arguments("foo", 'foo("%s", "foo");') ['foo(', '"%s",', ' "foo"', ')'] >>> parse_function_call_and_arguments("foo", 'foo("%s", "foo");') ['foo(', '"%s",', ' "foo"', ')'] >>> parse_function_call_and_arguments("foo", 'foo("%s %s", "foo", "bar");') ['foo(', '"%s %s",', ' "foo",', ' "bar"', ')'] >>> parse_function_call_and_arguments("fooprintf", 'fooprintf("%050d", i);') ['fooprintf(', '"%050d",', ' i', ')'] >>> parse_function_call_and_arguments("foo", 'foo(bar(foobar(barfoo("foo"))), foobar); barfoo') ['foo(', 'bar(foobar(barfoo("foo"))),', ' foobar', ')'] >>> parse_function_call_and_arguments("foo", "foo()") ['foo(', '', ')'] >>> parse_function_call_and_arguments("foo", "foo(123)") ['foo(', '123', ')'] >>> parse_function_call_and_arguments("foo", 'foo("foo")') ['foo(', '"foo"', ')'] """ assert type(function_name) is str and type( function_call) is str and function_name remaining = normalize(escape(function_call)) expected_function_call = "{}(".format(function_name) assert remaining.startswith(expected_function_call) parts = [expected_function_call] remaining = remaining[len(expected_function_call):] open_parentheses = 1 in_string = False parts.append("") for char in remaining: parts.append(parts.pop() + char) if char == "\"": in_string = not in_string continue if in_string: continue if char == "(": open_parentheses += 1 continue if char == ")": open_parentheses -= 1 if open_parentheses > 1: continue if open_parentheses == 0: parts.append(parts.pop()[:-1]) parts.append(char) break if char == ",": parts.append("") return parts def parse_string_content(argument): """Return the text within quotes in string argument. >>> parse_string_content('1 "foo %d bar" 2') 'foo %d bar' >>> parse_string_content('1 foobar 2') '' >>> parse_string_content('1 "bar" 2') 'bar' >>> parse_string_content('1 "foo" 2 "bar" 3') 'foobar' >>> parse_string_content('1 "foo" 2 " " "bar" 3') 'foo bar' >>> parse_string_content('""') '' >>> parse_string_content('') '' >>> parse_string_content('1 2 3') '' """ assert type(argument) is str string_content = "" in_string = False for char in normalize(escape(argument)): if char == "\"": in_string = not in_string elif in_string: string_content += char return string_content def count_format_specifiers(format_string): """Return the number of format specifiers in string format_string. >>> count_format_specifiers("foo bar foo") 0 >>> count_format_specifiers("foo %d bar foo") 1 >>> count_format_specifiers("foo %d bar %i foo") 2 >>> count_format_specifiers("foo %d bar %i foo %% foo") 2 >>> count_format_specifiers("foo %d bar %i foo %% foo %d foo") 3 >>> count_format_specifiers("foo %d bar %i foo %% foo %*d foo") 4 """ assert type(format_string) is str n = 0 in_specifier = False for i, char in enumerate(format_string): if format_string[i - 1:i + 1] == "%%" or format_string[i:i + 2] == "%%": pass elif char == "%": in_specifier = True n += 1 elif char in "aAcdeEfFgGinopsuxX": in_specifier = False elif in_specifier and char == "*": n += 1 return n def main(args_in): """ Return a string output with information on string format errors >>> main(["test/lint/lint-format-strings-tests.txt"]) test/lint/lint-format-strings-tests.txt: Expected 1 argument(s) after format string but found 2 argument(s): printf("%d", 1, 2) test/lint/lint-format-strings-tests.txt: Expected 2 argument(s) after format string but found 3 argument(s): printf("%a %b", 1, 2, "anything") test/lint/lint-format-strings-tests.txt: Expected 1 argument(s) after format string but found 0 argument(s): printf("%d") test/lint/lint-format-strings-tests.txt: Expected 3 argument(s) after format string but found 2 argument(s): printf("%a%b%z", 1, "anything") >>> main(["test/lint/lint-format-strings-tests-skip-arguments.txt"]) test/lint/lint-format-strings-tests-skip-arguments.txt: Expected 1 argument(s) after format string but found 2 argument(s): fprintf(skipped, "%d", 1, 2) test/lint/lint-format-strings-tests-skip-arguments.txt: Expected 1 argument(s) after format string but found 0 argument(s): fprintf(skipped, "%d") test/lint/lint-format-strings-tests-skip-arguments.txt: Expected 1 argument(s) after format string but found 2 argument(s): snprintf(skip1, skip2, "%d", 1, 2) test/lint/lint-format-strings-tests-skip-arguments.txt: Expected 1 argument(s) after format string but found 0 argument(s): snprintf(skip1, skip2, "%d") test/lint/lint-format-strings-tests-skip-arguments.txt: Could not parse function call string "snprintf(...)": snprintf(skip1, "%d") """ parser = argparse.ArgumentParser(description="This program checks that the number of arguments passed " "to a variadic format string function matches the number of format " "specifiers in the format string.") parser.add_argument("file", type=argparse.FileType( "r", encoding="utf-8"), nargs="*", help="C++ source code file (e.g. foo.cpp)") args = parser.parse_args(args_in) for f in args.file: file_content = f.read() for (function_name, skip_arguments) in FUNCTION_NAMES_AND_NUMBER_OF_LEADING_ARGUMENTS: for function_call_str in parse_function_calls(function_name, file_content): parts = parse_function_call_and_arguments( function_name, function_call_str) relevant_function_call_str = unescape("".join(parts))[:512] if (f.name, relevant_function_call_str) in FALSE_POSITIVES: continue if len(parts) < 3 + skip_arguments: print("{}: Could not parse function call string \"{}(...)\": {}".format( f.name, function_name, relevant_function_call_str)) continue argument_count = len(parts) - 3 - skip_arguments format_str = parse_string_content(parts[1 + skip_arguments]) format_specifier_count = count_format_specifiers(format_str) if format_specifier_count != argument_count: print("{}: Expected {} argument(s) after format string but found {} argument(s): {}".format( f.name, format_specifier_count, argument_count, relevant_function_call_str)) continue if __name__ == "__main__": doctest.testmod() main(sys.argv[1:])