diff --git a/test/functional/abc-segwit-recovery.py b/test/functional/abc-segwit-recovery.py index 7e4280eb7c..209411d424 100755 --- a/test/functional/abc-segwit-recovery.py +++ b/test/functional/abc-segwit-recovery.py @@ -1,281 +1,280 @@ #!/usr/bin/env python3 # Copyright (c) 2019 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ This test checks that blocks containing segwit recovery transactions will be accepted, that segwit recovery transactions are rejected from mempool acceptance (even with -acceptnonstdtxn=1), and that segwit recovery transactions don't result in bans. """ import time from test_framework.blocktools import ( create_block, create_coinbase, make_conform_to_ctor, ) from test_framework.messages import ( COIN, COutPoint, CTransaction, CTxIn, CTxOut, ToHex, ) from test_framework.mininode import ( P2PDataStore, ) from test_framework.script import ( CScript, hash160, OP_EQUAL, OP_HASH160, OP_TRUE, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_raises_rpc_error, - sync_blocks, ) TEST_TIME = int(time.time()) # Error due to non clean stack CLEANSTACK_ERROR = 'non-mandatory-script-verify-flag (Script did not clean its stack)' RPC_CLEANSTACK_ERROR = CLEANSTACK_ERROR + " (code 64)" EVAL_FALSE_ERROR = 'non-mandatory-script-verify-flag (Script evaluated without error but finished with a false/empty top stack elem' RPC_EVAL_FALSE_ERROR = EVAL_FALSE_ERROR + "ent) (code 64)" class PreviousSpendableOutput(object): def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n class SegwitRecoveryTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True self.block_heights = {} self.tip = None self.blocks = {} # We have 2 nodes: # 1) node_nonstd (nodes[0]) accepts non-standard txns. It does not # accept Segwit recovery transactions, since it is included in # standard flags, and transactions that violate these flags are # never accepted into the mempool. # 2) node_std (nodes[1]) doesn't accept non-standard txns and # doesn't have us whitelisted. It's used to test for bans, as we # connect directly to it via mininode and send a segwit spending # txn. This transaction is non-standard. We check that sending # this transaction doesn't result in a ban. # Nodes are connected to each other, so node_std receives blocks and # transactions that node_nonstd has accepted. Since we are checking # that segwit spending txn are not resulting in bans, node_nonstd # doesn't get banned when forwarding this kind of transactions to # node_std. self.extra_args = [['-whitelist=127.0.0.1', "-acceptnonstdtxn"], ["-acceptnonstdtxn=0"]] def next_block(self, number): if self.tip is None: base_block_hash = self.genesis_hash block_time = TEST_TIME else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height) coinbase.rehash() block = create_block(base_block_hash, coinbase, block_time) # Do PoW, which is cheap on regnet block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block def bootstrap_p2p(self, *, num_connections=1): """Add a P2P connection to the node. Helper to connect and wait for version handshake.""" for node in self.nodes: for _ in range(num_connections): node.add_p2p_connection(P2PDataStore()) def reconnect_p2p(self, **kwargs): """Tear down and bootstrap the P2P connection to the node. The node gets disconnected several times in this test. This helper method reconnects the p2p and restarts the network thread.""" for node in self.nodes: node.disconnect_p2ps() self.bootstrap_p2p(**kwargs) def run_test(self): self.bootstrap_p2p() self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # shorthand block = self.next_block node_nonstd = self.nodes[0] node_std = self.nodes[1] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # submit current tip and check it was accepted def accepted(node): node.p2p.send_blocks_and_test([self.tip], node) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] block.vtx.extend(new_transactions) old_sha256 = block.sha256 make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # checks the mempool has exactly the same txns as in the provided list def check_mempool_equal(node, txns): assert set(node.getrawmempool()) == set(tx.hash for tx in txns) # Returns 2 transactions: # 1) txfund: create outputs in segwit addresses # 2) txspend: spends outputs from segwit addresses def create_segwit_fund_and_spend_tx(spend, case0=False): if not case0: # Spending from a P2SH-P2WPKH coin, # txhash:a45698363249312f8d3d93676aa714be59b0bd758e62fa054fb1ea6218480691 redeem_script0 = bytearray.fromhex( '0014fcf9969ce1c98a135ed293719721fb69f0b686cb') # Spending from a P2SH-P2WSH coin, # txhash:6b536caf727ccd02c395a1d00b752098ec96e8ec46c96bee8582be6b5060fa2f redeem_script1 = bytearray.fromhex( '0020fc8b08ed636cb23afcb425ff260b3abd03380a2333b54cfa5d51ac52d803baf4') else: redeem_script0 = bytearray.fromhex('51020000') redeem_script1 = bytearray.fromhex('53020080') redeem_scripts = [redeem_script0, redeem_script1] # Fund transaction to segwit addresses txfund = CTransaction() txfund.vin = [CTxIn(COutPoint(spend.tx.sha256, spend.n))] amount = (50 * COIN - 1000) // len(redeem_scripts) for redeem_script in redeem_scripts: txfund.vout.append( CTxOut(amount, CScript([OP_HASH160, hash160(redeem_script), OP_EQUAL]))) txfund.rehash() # Segwit spending transaction # We'll test if a node that checks for standardness accepts this # txn. It should fail exclusively because of the restriction in # the scriptSig (non clean stack..), so all other characteristcs # must pass standardness checks. For this reason, we create # standard P2SH outputs. txspend = CTransaction() for i in range(len(redeem_scripts)): txspend.vin.append( CTxIn(COutPoint(txfund.sha256, i), CScript([redeem_scripts[i]]))) txspend.vout = [CTxOut(50 * COIN - 2000, CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]))] txspend.rehash() return txfund, txspend # Check we are not banned when sending a txn that is rejected. def check_for_no_ban_on_rejected_tx(node, tx, reject_reason): node.p2p.send_txs_and_test( [tx], node, success=False, reject_reason=reject_reason) # Create a new block block(0) save_spendable_output() accepted(node_nonstd) # Now we need that block to mature so we can spend the coinbase. matureblocks = [] for i in range(199): block(5000 + i) matureblocks.append(self.tip) save_spendable_output() node_nonstd.p2p.send_blocks_and_test(matureblocks, node_nonstd) # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Create segwit funding and spending transactions txfund, txspend = create_segwit_fund_and_spend_tx(out[0]) txfund_case0, txspend_case0 = create_segwit_fund_and_spend_tx( out[1], True) # Mine txfund, as it can't go into node_std mempool because it's # nonstandard. block(5555) update_block(5555, [txfund, txfund_case0]) accepted(node_nonstd) # Check both nodes are synchronized before continuing. - sync_blocks(self.nodes) + self.sync_blocks() # Check that upgraded nodes checking for standardness are not banning # nodes sending segwit spending txns. check_for_no_ban_on_rejected_tx( node_nonstd, txspend, CLEANSTACK_ERROR) check_for_no_ban_on_rejected_tx( node_nonstd, txspend_case0, EVAL_FALSE_ERROR) check_for_no_ban_on_rejected_tx( node_std, txspend, CLEANSTACK_ERROR) check_for_no_ban_on_rejected_tx( node_std, txspend_case0, EVAL_FALSE_ERROR) # Segwit recovery txns are never accepted into the mempool, # as they are included in standard flags. assert_raises_rpc_error(-26, RPC_CLEANSTACK_ERROR, node_nonstd.sendrawtransaction, ToHex(txspend)) assert_raises_rpc_error(-26, RPC_EVAL_FALSE_ERROR, node_nonstd.sendrawtransaction, ToHex(txspend_case0)) assert_raises_rpc_error(-26, RPC_CLEANSTACK_ERROR, node_std.sendrawtransaction, ToHex(txspend)) assert_raises_rpc_error(-26, RPC_EVAL_FALSE_ERROR, node_std.sendrawtransaction, ToHex(txspend_case0)) # Blocks containing segwit spending txns are accepted in both nodes. block(5) update_block(5, [txspend, txspend_case0]) accepted(node_nonstd) - sync_blocks(self.nodes) + self.sync_blocks() if __name__ == '__main__': SegwitRecoveryTest().main() diff --git a/test/functional/abc_p2p_avalanche.py b/test/functional/abc_p2p_avalanche.py index d81a47327e..0a0bab5ca7 100755 --- a/test/functional/abc_p2p_avalanche.py +++ b/test/functional/abc_p2p_avalanche.py @@ -1,221 +1,220 @@ #!/usr/bin/env python3 # Copyright (c) 2018 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the resolution of forks via avalanche.""" import random from test_framework.mininode import P2PInterface, mininode_lock from test_framework.messages import ( AvalancheResponse, AvalancheVote, CInv, msg_avapoll, msg_tcpavaresponse, TCPAvalancheResponse, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - sync_blocks, wait_until, ) from test_framework import schnorr BLOCK_ACCEPTED = 0 BLOCK_REJECTED = 1 BLOCK_UNKNOWN = -1 class TestNode(P2PInterface): def __init__(self): self.round = 0 self.avaresponses = [] self.avapolls = [] super().__init__() def on_avaresponse(self, message): with mininode_lock: self.avaresponses.append(message.response) def on_avapoll(self, message): with mininode_lock: self.avapolls.append(message.poll) def send_avaresponse(self, round, votes, privkey): response = AvalancheResponse(round, 0, votes) sig = schnorr.sign(privkey, response.get_hash()) msg = msg_tcpavaresponse() msg.response = TCPAvalancheResponse(response, sig) self.send_message(msg) def send_poll(self, hashes): msg = msg_avapoll() msg.poll.round = self.round self.round += 1 for h in hashes: msg.poll.invs.append(CInv(2, h)) self.send_message(msg) def wait_for_avaresponse(self, timeout=5): wait_until( lambda: len(self.avaresponses) > 0, timeout=timeout, lock=mininode_lock) with mininode_lock: return self.avaresponses.pop(0) def wait_for_avapoll(self, timeout=5): wait_until( lambda: len(self.avapolls) > 0, timeout=timeout, lock=mininode_lock) with mininode_lock: return self.avapolls.pop(0) class AvalancheTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [ ['-enableavalanche=1', '-avacooldown=0'], ['-enableavalanche=1', '-avacooldown=0', '-noparkdeepreorg', '-maxreorgdepth=-1']] def run_test(self): node = self.nodes[0] # Create a fake node and connect it to our real node. poll_node = TestNode() node.add_p2p_connection(poll_node) poll_node.wait_for_verack() poll_node.sync_with_ping() # Get our own node id so we can use it later. nodeid = node.getpeerinfo()[-1]['id'] # Generate many block and poll for them. address = node.get_deterministic_priv_key().address node.generatetoaddress(100, address) fork_node = self.nodes[1] # Make sure the fork node has synced the blocks - sync_blocks([node, fork_node]) + self.sync_blocks([node, fork_node]) # Get the key so we can verify signatures. avakey = bytes.fromhex(node.getavalanchekey()) self.log.info("Poll for the chain tip...") best_block_hash = int(node.getbestblockhash(), 16) poll_node.send_poll([best_block_hash]) def assert_response(expected): response = poll_node.wait_for_avaresponse() r = response.response assert_equal(r.cooldown, 0) # Verify signature. assert schnorr.verify(response.sig, avakey, r.get_hash()) votes = r.votes assert_equal(len(votes), len(expected)) for i in range(0, len(votes)): assert_equal(repr(votes[i]), repr(expected[i])) assert_response([AvalancheVote(BLOCK_ACCEPTED, best_block_hash)]) self.log.info("Poll for a selection of blocks...") various_block_hashes = [ int(node.getblockhash(0), 16), int(node.getblockhash(1), 16), int(node.getblockhash(10), 16), int(node.getblockhash(25), 16), int(node.getblockhash(42), 16), int(node.getblockhash(96), 16), int(node.getblockhash(99), 16), int(node.getblockhash(100), 16), ] poll_node.send_poll(various_block_hashes) assert_response([AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes]) self.log.info( "Poll for a selection of blocks, but some are now invalid...") invalidated_block = node.getblockhash(76) node.invalidateblock(invalidated_block) # We need to send the coin to a new address in order to make sure we do # not regenerate the same block. node.generatetoaddress( 26, 'bchreg:pqv2r67sgz3qumufap3h2uuj0zfmnzuv8v7ej0fffv') node.reconsiderblock(invalidated_block) poll_node.send_poll(various_block_hashes) assert_response([AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes[:5]] + [AvalancheVote(BLOCK_REJECTED, h) for h in various_block_hashes[-3:]]) self.log.info("Poll for unknown blocks...") various_block_hashes = [ int(node.getblockhash(0), 16), int(node.getblockhash(25), 16), int(node.getblockhash(42), 16), various_block_hashes[5], various_block_hashes[6], various_block_hashes[7], random.randrange(1 << 255, (1 << 256) - 1), random.randrange(1 << 255, (1 << 256) - 1), random.randrange(1 << 255, (1 << 256) - 1), ] poll_node.send_poll(various_block_hashes) assert_response([AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes[:3]] + [AvalancheVote(BLOCK_REJECTED, h) for h in various_block_hashes[3:6]] + [AvalancheVote(BLOCK_UNKNOWN, h) for h in various_block_hashes[-3:]]) self.log.info("Trigger polling from the node...") # duplicate the deterministic sig test from src/test/key_tests.cpp privkey = bytes.fromhex( "12b004fff7f4b69ef8650e767f18f11ede158148b425660723b9f9a66e61f747") pubkey = schnorr.getpubkey(privkey, compressed=True) node.addavalanchepeer(nodeid, pubkey.hex()) # Make sure the fork node has synced the blocks - sync_blocks([node, fork_node]) + self.sync_blocks([node, fork_node]) # Create a fork 2 blocks deep. This should trigger polling. fork_node.invalidateblock(fork_node.getblockhash(100)) fork_address = fork_node.get_deterministic_priv_key().address fork_node.generatetoaddress(2, fork_address) def can_find_block_in_poll(hash): poll = poll_node.wait_for_avapoll() invs = poll.invs votes = [] found_hash = False for inv in invs: # Look for what we expect if inv.hash == hash: found_hash = True # Vote yes to everything votes.append(AvalancheVote(BLOCK_ACCEPTED, inv.hash)) poll_node.send_avaresponse(poll.round, votes, privkey) return found_hash # Because the new tip is a deep reorg, the node should start to poll # for it. hash_to_find = int(fork_node.getbestblockhash(), 16) wait_until(lambda: can_find_block_in_poll(hash_to_find), timeout=5) # To verify that responses are processed, do it a few more times. for _ in range(10): wait_until(lambda: can_find_block_in_poll(hash_to_find), timeout=5) if __name__ == '__main__': AvalancheTest().main() diff --git a/test/functional/abc_wallet_standardness.py b/test/functional/abc_wallet_standardness.py index 231b0217d5..6f455c1e31 100755 --- a/test/functional/abc_wallet_standardness.py +++ b/test/functional/abc_wallet_standardness.py @@ -1,194 +1,193 @@ #!/usr/bin/env python3 # Copyright (c) 2019 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the response of wallet to a variety of weird / nonstandard coins that it might try to spend.""" from decimal import Decimal +from test_framework.messages import ( + CTransaction, + CTxOut, + FromHex, + ToHex, +) from test_framework.script import ( CScript, OP_1, OP_5, OP_CHECKSIG, OP_CHECKMULTISIG, OP_DUP, OP_EQUALVERIFY, OP_HASH160, OP_PUSHDATA1, hash160, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_raises_rpc_error, assert_equal, - sync_blocks, -) -from test_framework.messages import ( - CTransaction, - CTxOut, - FromHex, - ToHex, ) SATOSHI = Decimal('0.00000001') class WalletStandardnessTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [['-acceptnonstdtxn=0'], ['-acceptnonstdtxn=1']] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): std_node, nonstd_node = self.nodes address_nonstd = nonstd_node.getnewaddress() # make and mature some coins for the nonstandard node nonstd_node.generate(120) - sync_blocks(self.nodes) + self.sync_blocks() def fund_and_test_wallet(scriptPubKey, shouldBeStandard, shouldBeInWallet, amount=10000, spendfee=500, nonstd_error="scriptpubkey (code 64)", sign_error=None): """ Get the nonstandard node to fund a transaction, test its standardness by trying to broadcast on the standard node, then mine it and see if it ended up in the standard node's wallet. Finally, it attempts to spend the coin. """ self.log.info("Trying script {}".format(scriptPubKey.hex(),)) # get nonstandard node to fund the script tx = CTransaction() tx.vout.append(CTxOut(max(amount, 10000), scriptPubKey)) rawtx = nonstd_node.fundrawtransaction( ToHex(tx), {'lockUnspents': True, 'changePosition': 1})['hex'] # fundrawtransaction doesn't like to fund dust outputs, so we # have to manually override the amount. FromHex(tx, rawtx) tx.vout[0].nValue = min(amount, 10000) rawtx = nonstd_node.signrawtransactionwithwallet(ToHex(tx))['hex'] # ensure signing process did not disturb scriptPubKey signedtx = FromHex(CTransaction(), rawtx) assert_equal(scriptPubKey, signedtx.vout[0].scriptPubKey) txid = signedtx.rehash() balance_initial = std_node.getbalance() # try broadcasting it on the standard node if shouldBeStandard: std_node.sendrawtransaction(rawtx) assert txid in std_node.getrawmempool() else: assert_raises_rpc_error(-26, nonstd_error, std_node.sendrawtransaction, rawtx) assert txid not in std_node.getrawmempool() # make sure it's in nonstandard node's mempool, then mine it nonstd_node.sendrawtransaction(rawtx) assert txid in nonstd_node.getrawmempool() [blockhash] = nonstd_node.generate(1) # make sure it was mined assert txid in nonstd_node.getblock(blockhash)["tx"] - sync_blocks(self.nodes) + self.sync_blocks() wallet_outpoints = {(entry['txid'], entry['vout']) for entry in std_node.listunspent()} # calculate wallet balance change just as a double check balance_change = std_node.getbalance() - balance_initial if shouldBeInWallet: assert (txid, 0) in wallet_outpoints assert balance_change == amount * SATOSHI else: assert (txid, 0) not in wallet_outpoints assert balance_change == 0 # try spending the funds using the wallet. outamount = (amount - spendfee) * SATOSHI if outamount < 546 * SATOSHI: # If the final amount would be too small, then just donate # to miner fees. outputs = [{"data": b"to miner, with love".hex()}] else: outputs = [{address_nonstd: outamount}] spendtx = std_node.createrawtransaction( [{'txid': txid, 'vout': 0}], outputs) signresult = std_node.signrawtransactionwithwallet(spendtx) if sign_error is None: assert_equal(signresult['complete'], True) txid = std_node.sendrawtransaction(signresult['hex']) [blockhash] = std_node.generate(1) # make sure it was mined assert txid in std_node.getblock(blockhash)["tx"] - sync_blocks(self.nodes) + self.sync_blocks() else: assert_equal(signresult['complete'], False) assert_equal(signresult['errors'][0]['error'], sign_error) # we start with an empty wallet assert_equal(std_node.getbalance(), 0) address = std_node.getnewaddress() pubkey = bytes.fromhex(std_node.getaddressinfo(address)['pubkey']) pubkeyhash = hash160(pubkey) # P2PK fund_and_test_wallet(CScript([pubkey, OP_CHECKSIG]), True, True) fund_and_test_wallet( CScript([OP_PUSHDATA1, pubkey, OP_CHECKSIG]), False, False, sign_error='Data push larger than necessary') # P2PKH fund_and_test_wallet(CScript( [OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG]), True, True) # The signing error changes here since the script check (with empty # scriptSig) hits OP_DUP before it hits the nonminimal push; in all # other cases we hit the nonminimal push first. fund_and_test_wallet(CScript( [OP_DUP, OP_HASH160, OP_PUSHDATA1, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG]), False, False, sign_error='Unable to sign input, invalid stack size (possibly missing key)') # Bare multisig fund_and_test_wallet( CScript([OP_1, pubkey, OP_1, OP_CHECKMULTISIG]), True, False) fund_and_test_wallet( CScript([OP_1, OP_PUSHDATA1, pubkey, OP_1, OP_CHECKMULTISIG]), False, False, sign_error='Data push larger than necessary') fund_and_test_wallet( CScript([OP_1, pubkey, b'\x01', OP_CHECKMULTISIG]), False, False, sign_error='Data push larger than necessary') fund_and_test_wallet( CScript([b'\x01', pubkey, OP_1, OP_CHECKMULTISIG]), False, False, sign_error='Data push larger than necessary') # Note: 1-of-5 is nonstandard to fund yet is standard to spend. However, # trying to spend it with our wallet in particular will generate # too-dense sigchecks since our wallet currently only signs with ECDSA # (Schnorr would not have this issue). fund_and_test_wallet( CScript([OP_1, pubkey, pubkey, pubkey, pubkey, pubkey, OP_5, OP_CHECKMULTISIG]), False, False, sign_error='Input SigChecks limit exceeded') fund_and_test_wallet( CScript([OP_1, pubkey, pubkey, pubkey, OP_PUSHDATA1, pubkey, pubkey, OP_5, OP_CHECKMULTISIG]), False, False, sign_error='Data push larger than necessary') # Dust also is nonstandard to fund but standard to spend. fund_and_test_wallet( CScript([pubkey, OP_CHECKSIG]), False, True, amount=200, nonstd_error="dust (code 64)") # and we end with an empty wallet assert_equal(std_node.getbalance(), 0) if __name__ == '__main__': WalletStandardnessTest().main() diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py index 61701fb0cb..c0ee2fa4a0 100755 --- a/test/functional/feature_bip68_sequence.py +++ b/test/functional/feature_bip68_sequence.py @@ -1,494 +1,493 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test BIP68 implementation.""" import time from test_framework.blocktools import ( create_block, create_coinbase, ) from test_framework.messages import ( COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, ToHex, ) from test_framework.script import CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, disconnect_nodes, satoshi_round, - sync_blocks, ) SEQUENCE_LOCKTIME_DISABLE_FLAG = (1 << 31) # this means use time (0 means height) SEQUENCE_LOCKTIME_TYPE_FLAG = (1 << 22) # this is a bit-shift SEQUENCE_LOCKTIME_GRANULARITY = 9 SEQUENCE_LOCKTIME_MASK = 0x0000ffff # RPC error for non-BIP68 final transactions NOT_FINAL_ERROR = "non-BIP68-final (code 64)" class BIP68Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [["-noparkdeepreorg", "-maxreorgdepth=-1", "-acceptnonstdtxn=1"], ["-acceptnonstdtxn=0", "-maxreorgdepth=-1"]] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] # Generate some coins self.nodes[0].generate(110) self.log.info("Running test disable flag") self.test_disable_flag() self.log.info("Running test sequence-lock-confirmed-inputs") self.test_sequence_lock_confirmed_inputs() self.log.info("Running test sequence-lock-unconfirmed-inputs") self.test_sequence_lock_unconfirmed_inputs() self.log.info( "Running test BIP68 not consensus before versionbits activation") self.test_bip68_not_consensus() self.log.info("Activating BIP68 (and 112/113)") self.activateCSV() print("Verifying nVersion=2 transactions are standard.") print("Note that with current versions of bitcoin software, nVersion=2 transactions are always standard (independent of BIP68 activation status).") self.test_version2_relay() self.log.info("Passed") # Test that BIP68 is not in effect if tx version is 1, or if # the first sequence bit is set. def test_disable_flag(self): # Create some unconfirmed inputs new_addr = self.nodes[0].getnewaddress() # send 2 BCH self.nodes[0].sendtoaddress(new_addr, 2) utxos = self.nodes[0].listunspent(0, 0) assert len(utxos) > 0 utxo = utxos[0] tx1 = CTransaction() value = int(satoshi_round(utxo["amount"] - self.relayfee) * COIN) # Check that the disable flag disables relative locktime. # If sequence locks were used, this would require 1 block for the # input to mature. sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1 tx1.vin = [ CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)] tx1.vout = [CTxOut(value, CScript([b'a']))] pad_tx(tx1) tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))[ "hex"] tx1_id = self.nodes[0].sendrawtransaction(tx1_signed) tx1_id = int(tx1_id, 16) # This transaction will enable sequence-locks, so this transaction should # fail tx2 = CTransaction() tx2.nVersion = 2 sequence_value = sequence_value & 0x7fffffff tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)] tx2.vout = [CTxOut(int(value - self.relayfee * COIN), CScript([b'a']))] pad_tx(tx2) tx2.rehash() assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2)) # Setting the version back down to 1 should disable the sequence lock, # so this should be accepted. tx2.nVersion = 1 self.nodes[0].sendrawtransaction(ToHex(tx2)) # Calculate the median time past of a prior block ("confirmations" before # the current tip). def get_median_time_past(self, confirmations): block_hash = self.nodes[0].getblockhash( self.nodes[0].getblockcount() - confirmations) return self.nodes[0].getblockheader(block_hash)["mediantime"] # Test that sequence locks are respected for transactions spending # confirmed inputs. def test_sequence_lock_confirmed_inputs(self): # Create lots of confirmed utxos, and use them to generate lots of random # transactions. max_outputs = 50 addresses = [] while len(addresses) < max_outputs: addresses.append(self.nodes[0].getnewaddress()) while len(self.nodes[0].listunspent()) < 200: import random random.shuffle(addresses) num_outputs = random.randint(1, max_outputs) outputs = {} for i in range(num_outputs): outputs[addresses[i]] = random.randint(1, 20) * 0.01 self.nodes[0].sendmany("", outputs) self.nodes[0].generate(1) utxos = self.nodes[0].listunspent() # Try creating a lot of random transactions. # Each time, choose a random number of inputs, and randomly set # some of those inputs to be sequence locked (and randomly choose # between height/time locking). Small random chance of making the locks # all pass. for i in range(400): # Randomly choose up to 10 inputs num_inputs = random.randint(1, 10) random.shuffle(utxos) # Track whether any sequence locks used should fail should_pass = True # Track whether this transaction was built with sequence locks using_sequence_locks = False tx = CTransaction() tx.nVersion = 2 value = 0 for j in range(num_inputs): # this disables sequence locks sequence_value = 0xfffffffe # 50% chance we enable sequence locks if random.randint(0, 1): using_sequence_locks = True # 10% of the time, make the input sequence value pass input_will_pass = (random.randint(1, 10) == 1) sequence_value = utxos[j]["confirmations"] if not input_will_pass: sequence_value += 1 should_pass = False # Figure out what the median-time-past was for the confirmed input # Note that if an input has N confirmations, we're going back N blocks # from the tip so that we're looking up MTP of the block # PRIOR to the one the input appears in, as per the BIP68 # spec. orig_time = self.get_median_time_past( utxos[j]["confirmations"]) # MTP of the tip cur_time = self.get_median_time_past(0) # can only timelock this input if it's not too old -- # otherwise use height can_time_lock = True if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK: can_time_lock = False # if time-lockable, then 50% chance we make this a time # lock if random.randint(0, 1) and can_time_lock: # Find first time-lock value that fails, or latest one # that succeeds time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY if input_will_pass and time_delta > cur_time - orig_time: sequence_value = ( (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) elif (not input_will_pass and time_delta <= cur_time - orig_time): sequence_value = ( (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) + 1 sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx.vin.append( CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value)) value += utxos[j]["amount"] * COIN # Overestimate the size of the tx - signatures should be less than # 120 bytes, and leave 50 for the output tx_size = len(ToHex(tx)) // 2 + 120 * num_inputs + 50 tx.vout.append( CTxOut(int(value - self.relayfee * tx_size * COIN / 1000), CScript([b'a']))) rawtx = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))[ "hex"] if (using_sequence_locks and not should_pass): # This transaction should be rejected assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx) else: # This raw transaction should be accepted self.nodes[0].sendrawtransaction(rawtx) utxos = self.nodes[0].listunspent() # Test that sequence locks on unconfirmed inputs must have nSequence # height or time of 0 to be accepted. # Then test that BIP68-invalid transactions are removed from the mempool # after a reorg. def test_sequence_lock_unconfirmed_inputs(self): # Store height so we can easily reset the chain at the end of the test cur_height = self.nodes[0].getblockcount() # Create a mempool tx. txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # As the fees are calculated prior to the transaction being signed, # there is some uncertainty that calculate fee provides the correct # minimal fee. Since regtest coins are free, let's go ahead and # increase the fee by an order of magnitude to ensure this test # passes. fee_multiplier = 10 # Anyone-can-spend mempool tx. # Sequence lock of 0 should pass. tx2 = CTransaction() tx2.nVersion = 2 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(0), CScript([b'a']))] tx2.vout[0].nValue = tx1.vout[0].nValue - \ fee_multiplier * self.nodes[0].calculate_fee(tx2) tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) tx2.rehash() self.nodes[0].sendrawtransaction(tx2_raw) # Create a spend of the 0th output of orig_tx with a sequence lock # of 1, and test what happens when submitting. # orig_tx.vout[0] must be an anyone-can-spend output def test_nonzero_locks(orig_tx, node, use_height_lock): sequence_value = 1 if not use_height_lock: sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx = CTransaction() tx.nVersion = 2 tx.vin = [ CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)] tx.vout = [ CTxOut(int(orig_tx.vout[0].nValue - fee_multiplier * node.calculate_fee(tx)), CScript([b'a']))] pad_tx(tx) tx.rehash() if (orig_tx.hash in node.getrawmempool()): # sendrawtransaction should fail if the tx is in the mempool assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx)) else: # sendrawtransaction should succeed if the tx is not in the # mempool node.sendrawtransaction(ToHex(tx)) return tx test_nonzero_locks( tx2, self.nodes[0], use_height_lock=True) test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) # Now mine some blocks, but make sure tx2 doesn't get mined. # Use prioritisetransaction to lower the effective feerate to 0 self.nodes[0].prioritisetransaction( txid=tx2.hash, fee_delta=-fee_multiplier * self.nodes[0].calculate_fee(tx2)) cur_time = int(time.time()) for i in range(10): self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) cur_time += 600 assert tx2.hash in self.nodes[0].getrawmempool() test_nonzero_locks( tx2, self.nodes[0], use_height_lock=True) test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) # Mine tx2, and then try again self.nodes[0].prioritisetransaction( txid=tx2.hash, fee_delta=fee_multiplier * self.nodes[0].calculate_fee(tx2)) # Advance the time on the node so that we can test timelocks self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) assert tx2.hash not in self.nodes[0].getrawmempool() # Now that tx2 is not in the mempool, a sequence locked spend should # succeed tx3 = test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) assert tx3.hash in self.nodes[0].getrawmempool() self.nodes[0].generate(1) assert tx3.hash not in self.nodes[0].getrawmempool() # One more test, this time using height locks tx4 = test_nonzero_locks( tx3, self.nodes[0], use_height_lock=True) assert tx4.hash in self.nodes[0].getrawmempool() # Now try combining confirmed and unconfirmed inputs tx5 = test_nonzero_locks( tx4, self.nodes[0], use_height_lock=True) assert tx5.hash not in self.nodes[0].getrawmempool() utxos = self.nodes[0].listunspent() tx5.vin.append( CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) tx5.vout[0].nValue += int(utxos[0]["amount"] * COIN) raw_tx5 = self.nodes[0].signrawtransactionwithwallet(ToHex(tx5))["hex"] assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5) # Test mempool-BIP68 consistency after reorg # # State of the transactions in the last blocks: # ... -> [ tx2 ] -> [ tx3 ] # tip-1 tip # And currently tx4 is in the mempool. # # If we invalidate the tip, tx3 should get added to the mempool, causing # tx4 to be removed (fails sequence-lock). self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) assert tx4.hash not in self.nodes[0].getrawmempool() assert tx3.hash in self.nodes[0].getrawmempool() # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in # diagram above). # This would cause tx2 to be added back to the mempool, which in turn causes # tx3 to be removed. tip = int(self.nodes[0].getblockhash( self.nodes[0].getblockcount() - 1), 16) height = self.nodes[0].getblockcount() for i in range(2): block = create_block(tip, create_coinbase(height), cur_time) block.nVersion = 3 block.rehash() block.solve() tip = block.sha256 height += 1 self.nodes[0].submitblock(ToHex(block)) cur_time += 1 mempool = self.nodes[0].getrawmempool() assert tx3.hash not in mempool assert tx2.hash in mempool # Reset the chain and get rid of the mocktimed-blocks self.nodes[0].setmocktime(0) self.nodes[0].invalidateblock( self.nodes[0].getblockhash(cur_height + 1)) self.nodes[0].generate(10) def get_csv_status(self): height = self.nodes[0].getblockchaininfo()['blocks'] return height >= 576 # Make sure that BIP68 isn't being used to validate blocks, prior to # versionbits activation. If more blocks are mined prior to this test # being run, then it's possible the test has activated the soft fork, and # this test should be moved to run earlier, or deleted. def test_bip68_not_consensus(self): assert_equal(self.get_csv_status(), False) txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # Make an anyone-can-spend transaction tx2 = CTransaction() tx2.nVersion = 1 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(tx1.vout[0].nValue - self.relayfee * COIN), CScript([b'a']))] # sign tx2 tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) pad_tx(tx2) tx2.rehash() self.nodes[0].sendrawtransaction(ToHex(tx2)) # Now make an invalid spend of tx2 according to BIP68 # 100 block relative locktime sequence_value = 100 tx3 = CTransaction() tx3.nVersion = 2 tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)] tx3.vout = [ CTxOut(int(tx2.vout[0].nValue - self.relayfee * COIN), CScript([b'a']))] pad_tx(tx3) tx3.rehash() assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3)) # make a block that violates bip68; ensure that the tip updates tip = int(self.nodes[0].getbestblockhash(), 16) block = create_block( tip, create_coinbase(self.nodes[0].getblockcount() + 1)) block.nVersion = 3 block.vtx.extend( sorted([tx1, tx2, tx3], key=lambda tx: tx.get_id())) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].submitblock(ToHex(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) def activateCSV(self): # activation should happen at block height 576 csv_activation_height = 576 height = self.nodes[0].getblockcount() assert_greater_than(csv_activation_height - height, 1) self.nodes[0].generate(csv_activation_height - height - 1) assert_equal(self.get_csv_status(), False) disconnect_nodes(self.nodes[0], self.nodes[1]) self.nodes[0].generate(1) assert_equal(self.get_csv_status(), True) # We have a block that has CSV activated, but we want to be at # the activation point, so we invalidate the tip. self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) connect_nodes(self.nodes[0], self.nodes[1]) - sync_blocks(self.nodes) + self.sync_blocks() # Use self.nodes[1] to test that version 2 transactions are standard. def test_version2_relay(self): inputs = [] outputs = {self.nodes[1].getnewaddress(): 1.0} rawtx = self.nodes[1].createrawtransaction(inputs, outputs) rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex'] tx = FromHex(CTransaction(), rawtxfund) tx.nVersion = 2 tx_signed = self.nodes[1].signrawtransactionwithwallet(ToHex(tx))[ "hex"] self.nodes[1].sendrawtransaction(tx_signed) if __name__ == '__main__': BIP68Test().main() diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index af57df6c6c..ff1790537f 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -1,536 +1,535 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the pruning code. WARNING: This test uses 4GB of disk space. This test takes 30 mins or more (up to 2 hours) """ import os from test_framework.blocktools import create_coinbase from test_framework.messages import CBlock, ToHex from test_framework.script import CScript, OP_RETURN, OP_NOP from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, disconnect_nodes, - sync_blocks, wait_until, ) MIN_BLOCKS_TO_KEEP = 288 # Rescans start at the earliest block up to 2 hours before a key timestamp, so # the manual prune RPC avoids pruning blocks in the same window to be # compatible with pruning based on key creation time. TIMESTAMP_WINDOW = 2 * 60 * 60 def mine_large_blocks(node, n): # Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN # followed by 950k of OP_NOP. This would be non-standard in a non-coinbase # transaction but is consensus valid. # Set the nTime if this is the first time this function has been called. # A static variable ensures that time is monotonicly increasing and is therefore # different for each block created => blockhash is unique. if "nTimes" not in mine_large_blocks.__dict__: mine_large_blocks.nTime = 0 # Get the block parameters for the first block big_script = CScript([OP_RETURN] + [OP_NOP] * 950000) best_block = node.getblock(node.getbestblockhash()) height = int(best_block["height"]) + 1 mine_large_blocks.nTime = max( mine_large_blocks.nTime, int(best_block["time"])) + 1 previousblockhash = int(best_block["hash"], 16) for _ in range(n): # Build the coinbase transaction (with large scriptPubKey) coinbase_tx = create_coinbase(height) coinbase_tx.vin[0].nSequence = 2 ** 32 - 1 coinbase_tx.vout[0].scriptPubKey = big_script coinbase_tx.rehash() # Build the block block = CBlock() block.nVersion = best_block["version"] block.hashPrevBlock = previousblockhash block.nTime = mine_large_blocks.nTime block.nBits = int('207fffff', 16) block.nNonce = 0 block.vtx = [coinbase_tx] block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Submit to the node node.submitblock(ToHex(block)) previousblockhash = block.sha256 height += 1 mine_large_blocks.nTime += 1 def calc_usage(blockdir): return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.) class PruneTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 6 # Create nodes 0 and 1 to mine. # Create node 2 to test pruning. self.full_node_default_args = ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-checkblocks=5", "-noparkdeepreorg", "-maxreorgdepth=-1"] # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) # Create nodes 5 to test wallet in prune mode, but do not connect self.extra_args = [self.full_node_default_args, self.full_node_default_args, ["-maxreceivebuffer=20000", "-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-prune=550"]] self.rpc_timeout = 120 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self): self.setup_nodes() self.prunedir = os.path.join( self.nodes[2].datadir, 'regtest', 'blocks', '') connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[1], self.nodes[2]) connect_nodes(self.nodes[0], self.nodes[2]) connect_nodes(self.nodes[0], self.nodes[3]) connect_nodes(self.nodes[0], self.nodes[4]) - sync_blocks(self.nodes[0:5]) + self.sync_blocks(self.nodes[0:5]) def setup_nodes(self): self.add_nodes(self.num_nodes, self.extra_args) self.start_nodes() for n in self.nodes: n.importprivkey( privkey=n.get_deterministic_priv_key().key, label='coinbase', rescan=False) def create_big_chain(self): # Start by creating some coinbases we can spend later self.nodes[1].generate(200) - sync_blocks(self.nodes[0:2]) + self.sync_blocks(self.nodes[0:2]) self.nodes[0].generate(150) # Then mine enough full blocks to create more than 550MiB of data mine_large_blocks(self.nodes[0], 645) - sync_blocks(self.nodes[0:5]) + self.sync_blocks(self.nodes[0:5]) def test_height_min(self): assert os.path.isfile(os.path.join( self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early" self.log.info("Success") self.log.info("Though we're already using more than 550MiB, current usage: {}".format( calc_usage(self.prunedir))) self.log.info( "Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full # blocks past the height cutoff will ensure this mine_large_blocks(self.nodes[0], 25) # Wait for blk00000.dat to be pruned wait_until(lambda: not os.path.isfile( os.path.join(self.prunedir, "blk00000.dat")), timeout=30) self.log.info("Success") usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: {}".format(usage)) assert_greater_than(550, usage) def create_chain_with_staleblocks(self): # Create stale blocks in manageable sized chunks self.log.info( "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") for j in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and # then reorg's when node0 reconnects disconnect_nodes(self.nodes[0], self.nodes[1]) disconnect_nodes(self.nodes[0], self.nodes[2]) # Mine 24 blocks in node 1 mine_large_blocks(self.nodes[1], 24) # Reorg back with 25 block chain from node 0 mine_large_blocks(self.nodes[0], 25) # Create connections in the order so both nodes can see the reorg # at the same time connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[0], self.nodes[2]) - sync_blocks(self.nodes[0:3]) + self.sync_blocks(self.nodes[0:3]) self.log.info("Usage can be over target because of high stale rate: {}".format( calc_usage(self.prunedir))) def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node # 0 and Node 2's tip. This will cause Node 2 to do a reorg requiring # 288 blocks of undo data to the reorg_test chain. height = self.nodes[1].getblockcount() self.log.info("Current block height: {}".format(height)) self.forkheight = height - 287 self.forkhash = self.nodes[1].getblockhash(self.forkheight) self.log.info("Invalidating block {} at height {}".format( self.forkhash, self.forkheight)) self.nodes[1].invalidateblock(self.forkhash) # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want. # So invalidate that fork as well, until we're on the same chain as # node 0/2 (but at an ancestor 288 blocks ago) mainchainhash = self.nodes[0].getblockhash(self.forkheight - 1) curhash = self.nodes[1].getblockhash(self.forkheight - 1) while curhash != mainchainhash: self.nodes[1].invalidateblock(curhash) curhash = self.nodes[1].getblockhash(self.forkheight - 1) assert self.nodes[1].getblockcount() == self.forkheight - 1 self.log.info("New best height: {}".format( self.nodes[1].getblockcount())) # Disconnect node1 and generate the new chain disconnect_nodes(self.nodes[0], self.nodes[1]) disconnect_nodes(self.nodes[1], self.nodes[2]) self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) self.log.info("Reconnect nodes") connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[1], self.nodes[2]) - sync_blocks(self.nodes[0:3], timeout=120) + self.sync_blocks(self.nodes[0:3], timeout=120) self.log.info("Verify height on node 2: {}".format( self.nodes[2].getblockcount())) self.log.info("Usage possibly still high because of stale blocks in block files: {}".format( calc_usage(self.prunedir))) self.log.info( "Mine 220 more large blocks so we have requisite history") mine_large_blocks(self.nodes[0], 220) - sync_blocks(self.nodes[0:3], timeout=120) + self.sync_blocks(self.nodes[0:3], timeout=120) usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: {}".format(usage)) assert_greater_than(550, usage) def reorg_back(self): # Verify that a block on the old main chain fork has been pruned away assert_raises_rpc_error( -1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) self.log.info( "Will need to redownload block {}".format(self.forkheight)) # Verify that we have enough history to reorg back to the fork point. # Although this is more than 288 blocks, because this chain was written # more recently and only its other 299 small and 220 large blocks are in # the block files after it, it is expected to still be retained. self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) first_reorg_height = self.nodes[2].getblockcount() curchainhash = self.nodes[2].getblockhash(self.mainchainheight) self.nodes[2].invalidateblock(curchainhash) goalbestheight = self.mainchainheight goalbesthash = self.mainchainhash2 # As of 0.10 the current block download logic is not able to reorg to # the original chain created in create_chain_with_stale_blocks because # it doesn't know of any peer that's on that chain from which to # redownload its missing blocks. Invalidate the reorg_test chain in # node 0 as well, it can successfully switch to the original chain # because it has all the block data. However it must mine enough blocks # to have a more work chain than the reorg_test chain in order to # trigger node 2's block download logic. At this point node 2 is within # 288 blocks of the fork point so it will preserve its ability to # reorg. if self.nodes[2].getblockcount() < self.mainchainheight: blocks_to_mine = first_reorg_height + 1 - self.mainchainheight self.log.info( "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: {}".format( blocks_to_mine)) self.nodes[0].invalidateblock(curchainhash) assert_equal(self.nodes[0].getblockcount(), self.mainchainheight) assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2) goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] goalbestheight = first_reorg_height + 1 self.log.info( "Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") # Wait for Node 2 to reorg to proper height wait_until(lambda: self.nodes[2].getblockcount( ) >= goalbestheight, timeout=900) assert_equal(self.nodes[2].getbestblockhash(), goalbesthash) # Verify we can now have the data for a block previously pruned assert_equal(self.nodes[2].getblock( self.forkhash)["height"], self.forkheight) def manual_test(self, node_number, use_timestamp): # at this point, node has 995 blocks and has not yet run in prune mode self.start_node(node_number) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500) # now re-start in manual pruning mode self.stop_node(node_number) self.start_node(node_number, extra_args=["-prune=1"]) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) def height(index): if use_timestamp: return node.getblockheader(node.getblockhash(index))[ "time"] + TIMESTAMP_WINDOW else: return index def prune(index, expected_ret=None): ret = node.pruneblockchain(height=height(index)) # Check the return value. When use_timestamp is True, just check # that the return value is less than or equal to the expected # value, because when more than one block is generated per second, # a timestamp will not be granular enough to uniquely identify an # individual block. if expected_ret is None: expected_ret = index if use_timestamp: assert_greater_than(ret, 0) assert_greater_than(expected_ret + 1, ret) else: assert_equal(ret, expected_ret) def has_block(index): return os.path.isfile(os.path.join( self.nodes[node_number].datadir, "regtest", "blocks", "blk{:05}.dat".format(index))) # should not prune because chain tip of node 3 (995) < PruneAfterHeight # (1000) assert_raises_rpc_error( -1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) # Save block transaction count before pruning, assert value block1_details = node.getblock(node.getblockhash(1)) assert_equal(block1_details["nTx"], len(block1_details["tx"])) # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) node.generate(6) assert_equal(node.getblockchaininfo()["blocks"], 1001) # Pruned block should still know the number of transactions assert_equal(node.getblockheader(node.getblockhash(1)) ["nTx"], block1_details["nTx"]) # negative heights should raise an exception assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10) # height=100 too low to prune first block file so this is a no-op prune(100) assert has_block( 0), "blk00000.dat is missing when should still be there" # Does nothing node.pruneblockchain(height(0)) assert has_block( 0), "blk00000.dat is missing when should still be there" # height=500 should prune first file prune(500) assert not has_block( 0), "blk00000.dat is still there, should be pruned by now" assert has_block( 1), "blk00001.dat is missing when should still be there" # height=650 should prune second file prune(650) assert not has_block( 1), "blk00001.dat is still there, should be pruned by now" # height=1000 should not prune anything more, because tip-288 is in # blk00002.dat. prune(1000, 1001 - MIN_BLOCKS_TO_KEEP) assert has_block( 2), "blk00002.dat is still there, should be pruned by now" # advance the tip so blk00002.dat and blk00003.dat can be pruned (the # last 288 blocks should now be in blk00004.dat) node.generate(288) prune(1000) assert not has_block( 2), "blk00002.dat is still there, should be pruned by now" assert not has_block( 3), "blk00003.dat is still there, should be pruned by now" # stop node, start back up with auto-prune at 550MB, make sure still # runs self.stop_node(node_number) self.start_node(node_number, extra_args=["-prune=550"]) self.log.info("Success") def wallet_test(self): # check that the pruning node's wallet is still in good shape self.log.info("Stop and start pruning node to trigger wallet rescan") self.stop_node(2) self.start_node( 2, extra_args=["-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Success") # check that wallet loads successfully when restarting a pruned node after IBD. # this was reported to fail in #7494. self.log.info("Syncing node 5 to test wallet") connect_nodes(self.nodes[0], self.nodes[5]) nds = [self.nodes[0], self.nodes[5]] - sync_blocks(nds, wait=5, timeout=300) + self.sync_blocks(nds, wait=5, timeout=300) # Stop and start to trigger rescan self.stop_node(5) self.start_node( 5, extra_args=["-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Success") def run_test(self): self.log.info("Warning! This test requires 4GB of disk space") self.log.info("Mining a big blockchain of 995 blocks") self.create_big_chain() # Chain diagram key: # * blocks on main chain # +,&,$,@ blocks on other forks # X invalidated block # N1 Node 1 # # Start by mining a simple chain that all nodes have # N0=N1=N2 **...*(995) # stop manual-pruning node with 995 blocks self.stop_node(3) self.stop_node(4) self.log.info( "Check that we haven't started pruning yet because we're below PruneAfterHeight") self.test_height_min() # Extend this chain past the PruneAfterHeight # N0=N1=N2 **...*(1020) self.log.info( "Check that we'll exceed disk space target if we have a very high stale block rate") self.create_chain_with_staleblocks() # Disconnect N0 # And mine a 24 block chain on N1 and a separate 25 block chain on N0 # N1=N2 **...*+...+(1044) # N0 **...**...**(1045) # # reconnect nodes causing reorg on N1 and N2 # N1=N2 **...*(1020) *...**(1045) # \ # +...+(1044) # # repeat this process until you have 12 stale forks hanging off the # main chain on N1 and N2 # N0 *************************...***************************(1320) # # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) # \ \ \ # +...+(1044) &.. $...$(1319) # Save some current chain state for later use self.mainchainheight = self.nodes[2].getblockcount() # 1320 self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) self.log.info("Check that we can survive a 288 block reorg still") self.reorg_test() # (1033, ) # Now create a 288 block reorg by mining a longer chain on N1 # First disconnect N1 # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain # N1 **...*(1020) **...**(1032)X.. # \ # ++...+(1031)X.. # # Now mine 300 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@(1332) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # Reconnect nodes and mine 220 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # N2 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ *...**(1320) # \ \ # ++...++(1044) .. # # N0 ********************(1032) @@...@@@(1552) # \ # *...**(1320) self.log.info( "Test that we can rerequest a block we previously pruned if needed for a reorg") self.reorg_back() # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to # original main chain (*), but will require redownload of some blocks # In order to have a peer we think we can download from, must also perform this invalidation # on N0 and mine a new longest chain to trigger. # Final result: # N0 ********************(1032) **...****(1553) # \ # X@...@@@(1552) # # N2 **...*(1020) **...**(1032) **...****(1553) # \ \ # \ X@...@@@(1552) # \ # +.. # # N1 doesn't change because 1033 on main chain (*) is invalid self.log.info("Test manual pruning with block indices") self.manual_test(3, use_timestamp=False) self.log.info("Test manual pruning with timestamps") self.manual_test(4, use_timestamp=True) self.log.info("Test wallet re-scan") self.wallet_test() self.log.info("Done") if __name__ == '__main__': PruneTest().main() diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py index 6085faa3e3..6e39990aa2 100755 --- a/test/functional/mempool_packages.py +++ b/test/functional/mempool_packages.py @@ -1,351 +1,349 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test descendant package tracking code.""" from decimal import Decimal from test_framework.messages import COIN from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, satoshi_round, - sync_blocks, - sync_mempools, ) # TODO: The activation code can be reverted after the new policy becomes # active. # Phonon dummy activation time ACTIVATION_TIME = 2000000000 # Replay protection time needs to be moved beyond phonon activation REPLAY_PROTECTION_TIME = ACTIVATION_TIME * 2 MAX_ANCESTORS = 50 MAX_DESCENDANTS = 50 class MempoolPackagesTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 common_params = ["-maxorphantx=1000", "-phononactivationtime={}".format(ACTIVATION_TIME), "-replayprotectionactivationtime={}".format(REPLAY_PROTECTION_TIME)] self.extra_args = [common_params, common_params + ["-limitancestorcount=5"]] def skip_test_if_missing_module(self): self.skip_if_no_wallet() # Build a transaction that spends parent_txid:vout # Return amount sent def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs): send_value = satoshi_round((value - fee) / num_outputs) inputs = [{'txid': parent_txid, 'vout': vout}] outputs = {} for i in range(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs) signedtx = node.signrawtransactionwithwallet(rawtx) txid = node.sendrawtransaction(signedtx['hex']) fulltx = node.getrawtransaction(txid, 1) # make sure we didn't generate a change output assert len(fulltx['vout']) == num_outputs return (txid, send_value) def run_test(self): [n.setmocktime(ACTIVATION_TIME) for n in self.nodes] # Mine some blocks and have them mature. self.nodes[0].generate(101) utxo = self.nodes[0].listunspent(10) txid = utxo[0]['txid'] vout = utxo[0]['vout'] value = utxo[0]['amount'] fee = Decimal("0.0001") # MAX_ANCESTORS transactions off a confirmed tx should be fine chain = [] for i in range(MAX_ANCESTORS): (txid, sent_value) = self.chain_transaction( self.nodes[0], txid, 0, value, fee, 1) value = sent_value chain.append(txid) # Check mempool has MAX_ANCESTORS transactions in it, and descendant and ancestor # count and fees should look correct mempool = self.nodes[0].getrawmempool(True) assert_equal(len(mempool), MAX_ANCESTORS) descendant_count = 1 descendant_fees = 0 descendant_size = 0 ancestor_size = sum([mempool[tx]['size'] for tx in mempool]) ancestor_count = MAX_ANCESTORS ancestor_fees = sum([mempool[tx]['fee'] for tx in mempool]) descendants = [] ancestors = list(chain) for x in reversed(chain): # Check that getmempoolentry is consistent with getrawmempool entry = self.nodes[0].getmempoolentry(x) assert_equal(entry, mempool[x]) # Check that the descendant calculations are correct assert_equal(mempool[x]['descendantcount'], descendant_count) descendant_fees += mempool[x]['fee'] assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']) assert_equal(mempool[x]['fees']['base'], mempool[x]['fee']) assert_equal(mempool[x]['fees']['modified'], mempool[x]['modifiedfee']) assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN) assert_equal(mempool[x]['fees']['descendant'], descendant_fees) descendant_size += mempool[x]['size'] assert_equal(mempool[x]['descendantsize'], descendant_size) descendant_count += 1 # Check that ancestor calculations are correct assert_equal(mempool[x]['ancestorcount'], ancestor_count) assert_equal(mempool[x]['ancestorfees'], ancestor_fees * COIN) assert_equal(mempool[x]['ancestorsize'], ancestor_size) ancestor_size -= mempool[x]['size'] ancestor_fees -= mempool[x]['fee'] ancestor_count -= 1 # Check that parent/child list is correct assert_equal(mempool[x]['spentby'], descendants[-1:]) assert_equal(mempool[x]['depends'], ancestors[-2:-1]) # Check that getmempooldescendants is correct assert_equal(sorted(descendants), sorted( self.nodes[0].getmempooldescendants(x))) # Check getmempooldescendants verbose output is correct for descendant, dinfo in self.nodes[0].getmempooldescendants( x, True).items(): assert_equal(dinfo['depends'], [ chain[chain.index(descendant) - 1]]) if dinfo['descendantcount'] > 1: assert_equal(dinfo['spentby'], [ chain[chain.index(descendant) + 1]]) else: assert_equal(dinfo['spentby'], []) descendants.append(x) # Check that getmempoolancestors is correct ancestors.remove(x) assert_equal(sorted(ancestors), sorted( self.nodes[0].getmempoolancestors(x))) # Check that getmempoolancestors verbose output is correct for ancestor, ainfo in self.nodes[0].getmempoolancestors( x, True).items(): assert_equal(ainfo['spentby'], [ chain[chain.index(ancestor) + 1]]) if ainfo['ancestorcount'] > 1: assert_equal(ainfo['depends'], [ chain[chain.index(ancestor) - 1]]) else: assert_equal(ainfo['depends'], []) # Check that getmempoolancestors/getmempooldescendants correctly handle # verbose=true v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True) assert_equal(len(v_ancestors), len(chain) - 1) for x in v_ancestors.keys(): assert_equal(mempool[x], v_ancestors[x]) assert chain[-1] not in v_ancestors.keys() v_descendants = self.nodes[0].getmempooldescendants(chain[0], True) assert_equal(len(v_descendants), len(chain) - 1) for x in v_descendants.keys(): assert_equal(mempool[x], v_descendants[x]) assert chain[0] not in v_descendants.keys() # Check that ancestor modified fees includes fee deltas from # prioritisetransaction self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=1000) mempool = self.nodes[0].getrawmempool(True) ancestor_fees = 0 for x in chain: ancestor_fees += mempool[x]['fee'] assert_equal(mempool[x]['fees']['ancestor'], ancestor_fees + Decimal('0.00001')) assert_equal(mempool[x]['ancestorfees'], ancestor_fees * COIN + 1000) # Undo the prioritisetransaction for later tests self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=-1000) # Check that descendant modified fees includes fee deltas from # prioritisetransaction self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=1000) mempool = self.nodes[0].getrawmempool(True) descendant_fees = 0 for x in reversed(chain): descendant_fees += mempool[x]['fee'] assert_equal(mempool[x]['fees']['descendant'], descendant_fees + Decimal('0.00001')) assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000) # Adding one more transaction on to the chain should fail. assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], txid, vout, value, fee, 1) # Check that prioritising a tx before it's added to the mempool works # First clear the mempool by mining a block. self.nodes[0].generate(1) - sync_blocks(self.nodes) + self.sync_blocks() assert_equal(len(self.nodes[0].getrawmempool()), 0) # Prioritise a transaction that has been mined, then add it back to the # mempool by using invalidateblock. self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=2000) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Keep node1's tip synced with node0 self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash()) # Now check that the transaction is in the mempool, with the right # modified fee mempool = self.nodes[0].getrawmempool(True) descendant_fees = 0 for x in reversed(chain): descendant_fees += mempool[x]['fee'] if (x == chain[-1]): assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'] + satoshi_round(0.00002)) assert_equal(mempool[x]['fees']['modified'], mempool[x]['fee'] + satoshi_round(0.00002)) assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000) assert_equal(mempool[x]['fees']['descendant'], descendant_fees + satoshi_round(0.00002)) # TODO: check that node1's mempool is as expected # TODO: test ancestor size limits # Now test descendant chain limits txid = utxo[1]['txid'] value = utxo[1]['amount'] vout = utxo[1]['vout'] transaction_package = [] tx_children = [] # First create one parent tx with 10 children (txid, sent_value) = self.chain_transaction( self.nodes[0], txid, vout, value, fee, 10) parent_transaction = txid for i in range(10): transaction_package.append( {'txid': txid, 'vout': i, 'amount': sent_value}) # Sign and send up to MAX_DESCENDANT transactions chained off the # parent tx for i in range(MAX_DESCENDANTS - 1): utxo = transaction_package.pop(0) (txid, sent_value) = self.chain_transaction( self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10) if utxo['txid'] is parent_transaction: tx_children.append(txid) for j in range(10): transaction_package.append( {'txid': txid, 'vout': j, 'amount': sent_value}) mempool = self.nodes[0].getrawmempool(True) assert_equal(mempool[parent_transaction] ['descendantcount'], MAX_DESCENDANTS) assert_equal(sorted(mempool[parent_transaction] ['spentby']), sorted(tx_children)) for child in tx_children: assert_equal(mempool[child]['depends'], [parent_transaction]) # Sending one more chained transaction will fail utxo = transaction_package.pop(0) assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10) # TODO: check that node1's mempool is as expected # TODO: test descendant size limits # Test reorg handling # First, the basics: self.nodes[0].generate(1) - sync_blocks(self.nodes) + self.sync_blocks() self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash()) self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash()) # Now test the case where node1 has a transaction T in its mempool that # depends on transactions A and B which are in a mined block, and the # block containing A and B is disconnected, AND B is not accepted back # into node1's mempool because its ancestor count is too high. # Create 8 transactions, like so: # Tx0 -> Tx1 (vout0) # \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7 # # Mine them in the next block, then generate a new tx8 that spends # Tx1 and Tx7, and add to node1's mempool, then disconnect the # last block. # Create tx0 with 2 outputs utxo = self.nodes[0].listunspent() txid = utxo[0]['txid'] value = utxo[0]['amount'] vout = utxo[0]['vout'] send_value = satoshi_round((value - fee) / 2) inputs = [{'txid': txid, 'vout': vout}] outputs = {} for i in range(2): outputs[self.nodes[0].getnewaddress()] = send_value rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx) txid = self.nodes[0].sendrawtransaction(signedtx['hex']) tx0_id = txid value = send_value # Create tx1 tx1_id, _ = self.chain_transaction( self.nodes[0], tx0_id, 0, value, fee, 1) # Create tx2-7 vout = 1 txid = tx0_id for i in range(6): (txid, sent_value) = self.chain_transaction( self.nodes[0], txid, vout, value, fee, 1) vout = 0 value = sent_value # Mine these in a block self.nodes[0].generate(1) self.sync_all() # Now generate tx8, with a big fee inputs = [{'txid': tx1_id, 'vout': 0}, {'txid': txid, 'vout': 0}] outputs = {self.nodes[0].getnewaddress(): send_value + value - 4 * fee} rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx) txid = self.nodes[0].sendrawtransaction(signedtx['hex']) - sync_mempools(self.nodes) + self.sync_mempools() # Now try to disconnect the tip on each node... self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash()) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - sync_blocks(self.nodes) + self.sync_blocks() if __name__ == '__main__': MempoolPackagesTest().main() diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index 1f042d4f74..7421a1e276 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -1,934 +1,937 @@ #!/usr/bin/env python3 # Copyright (c) 2016-2019 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test compact blocks (BIP 152). Only testing Version 1 compact blocks (txids) """ import random from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import ( BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, HeaderAndShortIDs, msg_block, msg_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, NODE_NETWORK, P2PHeaderAndShortIDs, PrefilledTransaction, ToHex, ) from test_framework.mininode import ( mininode_lock, P2PInterface, ) from test_framework.script import CScript, OP_TRUE from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx -from test_framework.util import assert_equal, sync_blocks, wait_until +from test_framework.util import ( + assert_equal, + wait_until, +) # TestP2PConn: A peer we use to send messages to bitcoind, and store responses. class TestP2PConn(P2PInterface): def __init__(self): super().__init__() self.last_sendcmpct = [] self.block_announced = False # Store the hashes of blocks we've seen announced. # This is for synchronizing the p2p message traffic, # so we can eg wait until a particular block is announced. self.announced_blockhashes = set() def on_sendcmpct(self, message): self.last_sendcmpct.append(message) def on_cmpctblock(self, message): self.block_announced = True self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() self.announced_blockhashes.add( self.last_message["cmpctblock"].header_and_shortids.header.sha256) def on_headers(self, message): self.block_announced = True for x in self.last_message["headers"].headers: x.calc_sha256() self.announced_blockhashes.add(x.sha256) def on_inv(self, message): for x in self.last_message["inv"].inv: if x.type == 2: self.block_announced = True self.announced_blockhashes.add(x.hash) # Requires caller to hold mininode_lock def received_block_announcement(self): return self.block_announced def clear_block_announcement(self): with mininode_lock: self.block_announced = False self.last_message.pop("inv", None) self.last_message.pop("headers", None) self.last_message.pop("cmpctblock", None) def get_headers(self, locator, hashstop): msg = msg_getheaders() msg.locator.vHave = locator msg.hashstop = hashstop self.send_message(msg) def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) def request_headers_and_sync(self, locator, hashstop=0): self.clear_block_announcement() self.get_headers(locator, hashstop) wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock) self.clear_block_announcement() # Block until a block announcement for a particular block hash is # received. def wait_for_block_announcement(self, block_hash, timeout=30): def received_hash(): return (block_hash in self.announced_blockhashes) wait_until(received_hash, timeout=timeout, lock=mininode_lock) def send_await_disconnect(self, message, timeout=30): """Sends a message to the node and wait for disconnect. This is used when we want to send a message into the node that we expect will get us disconnected, eg an invalid block.""" self.send_message(message) wait_until(lambda: not self.is_connected, timeout=timeout, lock=mininode_lock) class CompactBlocksTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [["-acceptnonstdtxn=1"], ["-txindex", "-acceptnonstdtxn=1"]] self.utxos = [] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def build_block_on_tip(self, node): height = node.getblockcount() tip = node.getbestblockhash() mtp = node.getblockheader(tip)['mediantime'] block = create_block( int(tip, 16), create_coinbase(height + 1), mtp + 1) block.nVersion = 4 block.solve() return block # Create 10 more anyone-can-spend utxo's for testing. def make_utxos(self): # Doesn't matter which node we use, just use node0. block = self.build_block_on_tip(self.nodes[0]) self.test_node.send_and_ping(msg_block(block)) assert int(self.nodes[0].getbestblockhash(), 16) == block.sha256 self.nodes[0].generate(100) total_value = block.vtx[0].vout[0].nValue out_value = total_value // 10 tx = CTransaction() tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b'')) for i in range(10): tx.vout.append(CTxOut(out_value, CScript([OP_TRUE]))) tx.rehash() block2 = self.build_block_on_tip(self.nodes[0]) block2.vtx.append(tx) block2.hashMerkleRoot = block2.calc_merkle_root() block2.solve() self.test_node.send_and_ping(msg_block(block2)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256) self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)]) return # Test "sendcmpct" (between peers preferring the same version): # - No compact block announcements unless sendcmpct is sent. # - If sendcmpct is sent with version > preferred_version, the message is ignored. # - If sendcmpct is sent with boolean 0, then block announcements are not # made with compact blocks. # - If sendcmpct is then sent with boolean 1, then new block announcements # are made with compact blocks. # If old_node is passed in, request compact blocks with version=preferred-1 # and verify that it receives block announcements via compact block. def test_sendcmpct(self, node, test_node, preferred_version, old_node=None): # Make sure we get a SENDCMPCT message from our peer def received_sendcmpct(): return (len(test_node.last_sendcmpct) > 0) wait_until(received_sendcmpct, timeout=30, lock=mininode_lock) with mininode_lock: # Check that the first version received is the preferred one assert_equal( test_node.last_sendcmpct[0].version, preferred_version) # And that we receive versions down to 1. assert_equal(test_node.last_sendcmpct[-1].version, 1) test_node.last_sendcmpct = [] tip = int(node.getbestblockhash(), 16) def check_announcement_of_new_block(node, peer, predicate): peer.clear_block_announcement() block_hash = int(node.generate(1)[0], 16) peer.wait_for_block_announcement(block_hash, timeout=30) assert peer.block_announced with mininode_lock: assert predicate(peer), ( "block_hash={!r}, cmpctblock={!r}, inv={!r}".format( block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None))) # We shouldn't get any block announcements via cmpctblock yet. check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message) # Try one more time, this time after requesting headers. test_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message) # Test a few ways of using sendcmpct that should NOT # result in compact block announcements. # Before each test, sync the headers chain. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with too-high version sendcmpct = msg_sendcmpct() sendcmpct.version = 999 # was: preferred_version+1 sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with valid version, but announce=False sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Finally, try a SENDCMPCT message with announce=True sendcmpct.version = preferred_version sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time (no headers sync should be needed!) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time, after turning on sendheaders test_node.send_and_ping(msg_sendheaders()) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time, after sending a version-1, announce=false message. sendcmpct.version = preferred_version - 1 sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Now turn off announcements sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message) if old_node is not None: # Verify that a peer using an older protocol version can receive # announcements from this node. sendcmpct.version = 1 # preferred_version-1 sendcmpct.announce = True old_node.send_and_ping(sendcmpct) # Header sync old_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block( node, old_node, lambda p: "cmpctblock" in p.last_message) # This test actually causes bitcoind to (reasonably!) disconnect us, so do # this last. def test_invalid_cmpctblock_message(self): self.nodes[0].generate(101) block = self.build_block_on_tip(self.nodes[0]) cmpct_block = P2PHeaderAndShortIDs() cmpct_block.header = CBlockHeader(block) cmpct_block.prefilled_txn_length = 1 # This index will be too high prefilled_txn = PrefilledTransaction(1, block.vtx[0]) cmpct_block.prefilled_txn = [prefilled_txn] self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block)) assert_equal( int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock) # Compare the generated shortids to what we expect based on BIP 152, given # bitcoind's choice of nonce. def test_compactblock_construction(self, node, test_node): # Generate a bunch of transactions. node.generate(101) num_transactions = 25 address = node.getnewaddress() for i in range(num_transactions): txid = node.sendtoaddress(address, 0.1) hex_tx = node.gettransaction(txid)["hex"] tx = FromHex(CTransaction(), hex_tx) # Wait until we've seen the block announcement for the resulting tip tip = int(node.getbestblockhash(), 16) test_node.wait_for_block_announcement(tip) # Make sure we will receive a fast-announce compact block self.request_cb_announcements(test_node, node) # Now mine a block, and look at the resulting compact block. test_node.clear_block_announcement() block_hash = int(node.generate(1)[0], 16) # Store the raw block in our internal format. block = FromHex(CBlock(), node.getblock( "{:064x}".format(block_hash), False)) for tx in block.vtx: tx.calc_sha256() block.rehash() # Wait until the block was announced (via compact blocks) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: assert "cmpctblock" in test_node.last_message # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs( test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block( header_and_shortids, block_hash, block) # Now fetch the compact block using a normal non-announce getdata with mininode_lock: test_node.clear_block_announcement() inv = CInv(4, block_hash) # 4 == "CompactBlock" test_node.send_message(msg_getdata([inv])) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: assert "cmpctblock" in test_node.last_message # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs( test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block( header_and_shortids, block_hash, block) def check_compactblock_construction_from_block( self, header_and_shortids, block_hash, block): # Check that we got the right block! header_and_shortids.header.calc_sha256() assert_equal(header_and_shortids.header.sha256, block_hash) # Make sure the prefilled_txn appears to have included the coinbase assert len(header_and_shortids.prefilled_txn) >= 1 assert_equal(header_and_shortids.prefilled_txn[0].index, 0) # Check that all prefilled_txn entries match what's in the block. for entry in header_and_shortids.prefilled_txn: entry.tx.calc_sha256() # This checks the tx agree assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256) # Check that the cmpctblock message announced all the transactions. assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx)) # And now check that all the shortids are as expected as well. # Determine the siphash keys to use. [k0, k1] = header_and_shortids.get_siphash_keys() index = 0 while index < len(block.vtx): if (len(header_and_shortids.prefilled_txn) > 0 and header_and_shortids.prefilled_txn[0].index == index): # Already checked prefilled transactions above header_and_shortids.prefilled_txn.pop(0) else: tx_hash = block.vtx[index].sha256 shortid = calculate_shortid(k0, k1, tx_hash) assert_equal(shortid, header_and_shortids.shortids[0]) header_and_shortids.shortids.pop(0) index += 1 # Test that bitcoind requests compact blocks when we announce new blocks # via header or inv, and that responding to getblocktxn causes the block # to be successfully reconstructed. def test_compactblock_requests(self, node, test_node, version): # Try announcing a block with an inv or header, expect a compactblock # request for announce in ["inv", "header"]: block = self.build_block_on_tip(node) with mininode_lock: test_node.last_message.pop("getdata", None) if announce == "inv": test_node.send_message(msg_inv([CInv(2, block.sha256)])) wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock) test_node.send_header_for_blocks([block]) else: test_node.send_header_for_blocks([block]) wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock) assert_equal(len(test_node.last_message["getdata"].inv), 1) assert_equal(test_node.last_message["getdata"].inv[0].type, 4) assert_equal( test_node.last_message["getdata"].inv[0].hash, block.sha256) # Send back a compactblock message that omits the coinbase comp_block = HeaderAndShortIDs() comp_block.header = CBlockHeader(block) comp_block.nonce = 0 [k0, k1] = comp_block.get_siphash_keys() coinbase_hash = block.vtx[0].sha256 comp_block.shortids = [ calculate_shortid(k0, k1, coinbase_hash)] test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # Expect a getblocktxn message. with mininode_lock: assert "getblocktxn" in test_node.last_message absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute( ) assert_equal(absolute_indexes, [0]) # should be a coinbase request # Send the coinbase, and verify that the tip advances. msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = [block.vtx[0]] test_node.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) # Create a chain of transactions from given utxo, and add to a new block. # Note that num_transactions is number of transactions not including the # coinbase. def build_block_with_transactions(self, node, utxo, num_transactions): block = self.build_block_on_tip(node) for i in range(num_transactions): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b'')) tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE]))) pad_tx(tx) tx.rehash() utxo = [tx.sha256, 0, tx.vout[0].nValue] block.vtx.append(tx) ordered_txs = block.vtx block.vtx = [block.vtx[0]] + \ sorted(block.vtx[1:], key=lambda tx: tx.get_id()) block.hashMerkleRoot = block.calc_merkle_root() block.solve() return block, ordered_txs # Test that we only receive getblocktxn requests for transactions that the # node needs, and that responding to them causes the block to be # reconstructed. def test_getblocktxn_requests(self, node, test_node, version): def test_getblocktxn_response(compact_block, peer, expected_result): msg = msg_cmpctblock(compact_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert "getblocktxn" in peer.last_message absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute( ) assert_equal(absolute_indexes, expected_result) def test_tip_after_message(node, peer, msg, tip): peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), tip) # First try announcing compactblocks that won't reconstruct, and verify # that we receive getblocktxn messages back. utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5]) msg_bt = msg_blocktxn() msg_bt.block_transactions = BlockTransactions( block.sha256, block.vtx[1:]) test_tip_after_message(node, test_node, msg_bt, block.sha256) utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) # Now try interspersing the prefilled transactions comp_block.initialize_from_block( block, prefill_list=[0, 1, 5]) test_getblocktxn_response(comp_block, test_node, [2, 3, 4]) msg_bt.block_transactions = BlockTransactions( block.sha256, block.vtx[2:5]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now try giving one transaction ahead of time. utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) test_node.send_and_ping(msg_tx(ordered_txs[1])) assert ordered_txs[1].hash in node.getrawmempool() test_node.send_and_ping(msg_tx(ordered_txs[1])) # Prefill 4 out of the 6 transactions, and verify that only the one # that was not in the mempool is requested. prefill_list = [0, 1, 2, 3, 4, 5] prefill_list.remove(block.vtx.index(ordered_txs[1])) expected_index = block.vtx.index(ordered_txs[-1]) prefill_list.remove(expected_index) comp_block.initialize_from_block(block, prefill_list=prefill_list) test_getblocktxn_response(comp_block, test_node, [expected_index]) msg_bt.block_transactions = BlockTransactions( block.sha256, [ordered_txs[5]]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now provide all transactions to the node before the block is # announced and verify reconstruction happens immediately. utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 10) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) for tx in ordered_txs[1:]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in block.vtx[1:]: assert tx.hash in mempool # Clear out last request. with mininode_lock: test_node.last_message.pop("getblocktxn", None) # Send compact block comp_block.initialize_from_block(block, prefill_list=[0]) test_tip_after_message( node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256) with mininode_lock: # Shouldn't have gotten a request for any transaction assert "getblocktxn" not in test_node.last_message # Incorrectly responding to a getblocktxn shouldn't cause the block to be # permanently failed. def test_incorrect_blocktxn_response(self, node, test_node, version): if (len(self.utxos) == 0): self.make_utxos() utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 10) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) # Relay the first 5 transactions from the block in advance for tx in ordered_txs[1:6]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in ordered_txs[1:6]: assert tx.hash in mempool # Send compact block comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0]) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) absolute_indices = [] with mininode_lock: assert "getblocktxn" in test_node.last_message absolute_indices = test_node.last_message["getblocktxn"].block_txn_request.to_absolute( ) expected_indices = [] for i in [6, 7, 8, 9, 10]: expected_indices.append(block.vtx.index(ordered_txs[i])) assert_equal(absolute_indices, sorted(expected_indices)) # Now give an incorrect response. # Note that it's possible for bitcoind to be smart enough to know we're # lying, since it could check to see if the shortid matches what we're # sending, and eg disconnect us for misbehavior. If that behavior # change was made, we could just modify this test by having a # different peer provide the block further down, so that we're still # verifying that the block isn't marked bad permanently. This is good # enough for now. msg = msg_blocktxn() msg.block_transactions = BlockTransactions( block.sha256, [ordered_txs[5]] + ordered_txs[7:]) test_node.send_and_ping(msg) # Tip should not have updated assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # We should receive a getdata request wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock) assert_equal(len(test_node.last_message["getdata"].inv), 1) assert test_node.last_message["getdata"].inv[0].type == 2 assert_equal( test_node.last_message["getdata"].inv[0].hash, block.sha256) # Deliver the block test_node.send_and_ping(msg_block(block)) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def test_getblocktxn_handler(self, node, test_node, version): # bitcoind will not send blocktxn responses for blocks whose height is # more than 10 blocks deep. MAX_GETBLOCKTXN_DEPTH = 10 chain_height = node.getblockcount() current_height = chain_height while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH): block_hash = node.getblockhash(current_height) block = FromHex(CBlock(), node.getblock(block_hash, False)) msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest( int(block_hash, 16), []) num_to_request = random.randint(1, len(block.vtx)) msg.block_txn_request.from_absolute( sorted(random.sample(range(len(block.vtx)), num_to_request))) test_node.send_message(msg) wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock) [tx.calc_sha256() for tx in block.vtx] with mininode_lock: assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int( block_hash, 16)) all_indices = msg.block_txn_request.to_absolute() for index in all_indices: tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop( 0) tx.calc_sha256() assert_equal(tx.sha256, block.vtx[index].sha256) test_node.last_message.pop("blocktxn", None) current_height -= 1 # Next request should send a full block response, as we're past the # allowed depth for a blocktxn response. block_hash = node.getblockhash(current_height) msg.block_txn_request = BlockTransactionsRequest( int(block_hash, 16), [0]) with mininode_lock: test_node.last_message.pop("block", None) test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with mininode_lock: test_node.last_message["block"].block.calc_sha256() assert_equal( test_node.last_message["block"].block.sha256, int(block_hash, 16)) assert "blocktxn" not in test_node.last_message def test_compactblocks_not_at_tip(self, node, test_node): # Test that requesting old compactblocks doesn't work. MAX_CMPCTBLOCK_DEPTH = 5 new_blocks = [] for i in range(MAX_CMPCTBLOCK_DEPTH + 1): test_node.clear_block_announcement() new_blocks.append(node.generate(1)[0]) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) test_node.clear_block_announcement() test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock) test_node.clear_block_announcement() node.generate(1) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) test_node.clear_block_announcement() with mininode_lock: test_node.last_message.pop("block", None) test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock) with mininode_lock: test_node.last_message["block"].block.calc_sha256() assert_equal( test_node.last_message["block"].block.sha256, int(new_blocks[0], 16)) # Generate an old compactblock, and verify that it's not accepted. cur_height = node.getblockcount() hashPrevBlock = int(node.getblockhash(cur_height - 5), 16) block = self.build_block_on_tip(node) block.hashPrevBlock = hashPrevBlock block.solve() comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) tips = node.getchaintips() found = False for x in tips: if x["hash"] == block.hash: assert_equal(x["status"], "headers-only") found = True break assert found # Requesting this block via getblocktxn should silently fail # (to avoid fingerprinting attacks). msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0]) with mininode_lock: test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with mininode_lock: assert "blocktxn" not in test_node.last_message def test_end_to_end_block_relay(self, node, listeners): utxo = self.utxos.pop(0) block, _ = self.build_block_with_transactions(node, utxo, 10) [listener.clear_block_announcement() for listener in listeners] node.submitblock(ToHex(block)) for listener in listeners: wait_until(lambda: listener.received_block_announcement(), timeout=30, lock=mininode_lock) with mininode_lock: for listener in listeners: assert "cmpctblock" in listener.last_message listener.last_message["cmpctblock"].header_and_shortids.header.calc_sha256( ) assert_equal( listener.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256) # Test that we don't get disconnected if we relay a compact block with valid header, # but invalid transactions. def test_invalid_tx_in_compactblock(self, node, test_node): assert len(self.utxos) utxo = self.utxos[0] block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) block.vtx.remove(ordered_txs[3]) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Now send the compact block with all transactions prefilled, and # verify that we don't get disconnected. comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4]) msg = msg_cmpctblock(comp_block.to_p2p()) test_node.send_and_ping(msg) # Check that the tip didn't advance assert int(node.getbestblockhash(), 16) is not block.sha256 test_node.sync_with_ping() # Helper for enabling cb announcements # Send the sendcmpct request and sync headers def request_cb_announcements(self, peer, node, version=1): tip = node.getbestblockhash() peer.get_headers(locator=[int(tip, 16)], hashstop=0) msg = msg_sendcmpct() msg.version = version msg.announce = True peer.send_and_ping(msg) def test_compactblock_reconstruction_multiple_peers( self, node, stalling_peer, delivery_peer): assert len(self.utxos) def announce_cmpct_block(node, peer): utxo = self.utxos.pop(0) block, _ = self.build_block_with_transactions(node, utxo, 5) cmpct_block = HeaderAndShortIDs() cmpct_block.initialize_from_block(block) msg = msg_cmpctblock(cmpct_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert "getblocktxn" in peer.last_message return block, cmpct_block block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() mempool = node.getrawmempool() for tx in block.vtx[1:]: assert tx.hash in mempool delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.sha256) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Now test that delivering an invalid compact block won't break relay block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() # TODO: modify txhash in a way that doesn't impact txid. delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) # Because txhash isn't modified, we end up reconstructing the same block # assert int(node.getbestblockhash(), 16) != block.sha256 msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = block.vtx[1:] stalling_peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def run_test(self): # Setup the p2p connections self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn()) self.ex_softfork_node = self.nodes[1].add_p2p_connection( TestP2PConn(), services=NODE_NETWORK) self.old_node = self.nodes[1].add_p2p_connection( TestP2PConn(), services=NODE_NETWORK) # We will need UTXOs to construct transactions in later tests. self.make_utxos() self.log.info("Running tests:") self.log.info("\tTesting SENDCMPCT p2p message... ") self.test_sendcmpct(self.nodes[0], self.test_node, 1) - sync_blocks(self.nodes) + self.sync_blocks() self.test_sendcmpct( self.nodes[1], self.ex_softfork_node, 1, old_node=self.old_node) - sync_blocks(self.nodes) + self.sync_blocks() self.log.info("\tTesting compactblock construction...") self.test_compactblock_construction(self.nodes[0], self.test_node) - sync_blocks(self.nodes) + self.sync_blocks() self.test_compactblock_construction( self.nodes[1], self.ex_softfork_node) - sync_blocks(self.nodes) + self.sync_blocks() self.log.info("\tTesting compactblock requests... ") self.test_compactblock_requests(self.nodes[0], self.test_node, 1) - sync_blocks(self.nodes) + self.sync_blocks() self.test_compactblock_requests( self.nodes[1], self.ex_softfork_node, 2) - sync_blocks(self.nodes) + self.sync_blocks() self.log.info("\tTesting getblocktxn requests...") self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) - sync_blocks(self.nodes) + self.sync_blocks() self.test_getblocktxn_requests(self.nodes[1], self.ex_softfork_node, 2) - sync_blocks(self.nodes) + self.sync_blocks() self.log.info("\tTesting getblocktxn handler...") self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1) - sync_blocks(self.nodes) + self.sync_blocks() self.test_getblocktxn_handler(self.nodes[1], self.ex_softfork_node, 2) self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) - sync_blocks(self.nodes) + self.sync_blocks() self.log.info( "\tTesting compactblock requests/announcements not at chain tip...") self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node) - sync_blocks(self.nodes) + self.sync_blocks() self.test_compactblocks_not_at_tip( self.nodes[1], self.ex_softfork_node) self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node) - sync_blocks(self.nodes) + self.sync_blocks() self.log.info("\tTesting handling of incorrect blocktxn responses...") self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1) - sync_blocks(self.nodes) + self.sync_blocks() self.test_incorrect_blocktxn_response( self.nodes[1], self.ex_softfork_node, 2) - sync_blocks(self.nodes) + self.sync_blocks() # End-to-end block relay tests self.log.info("\tTesting end-to-end block relay...") self.request_cb_announcements(self.test_node, self.nodes[0]) self.request_cb_announcements(self.old_node, self.nodes[1]) self.request_cb_announcements( self.ex_softfork_node, self.nodes[1], version=2) self.test_end_to_end_block_relay( self.nodes[0], [self.ex_softfork_node, self.test_node, self.old_node]) self.test_end_to_end_block_relay( self.nodes[1], [self.ex_softfork_node, self.test_node, self.old_node]) self.log.info("\tTesting handling of invalid compact blocks...") self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node) self.test_invalid_tx_in_compactblock( self.nodes[1], self.ex_softfork_node) self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node) self.log.info( "\tTesting reconstructing compact blocks from all peers...") self.test_compactblock_reconstruction_multiple_peers( self.nodes[1], self.ex_softfork_node, self.old_node) - sync_blocks(self.nodes) + self.sync_blocks() self.log.info("\tTesting invalid index in cmpctblock message...") self.test_invalid_cmpctblock_message() if __name__ == '__main__': CompactBlocksTest().main() diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py index 25bb79dfb4..be280322d0 100755 --- a/test/functional/p2p_feefilter.py +++ b/test/functional/p2p_feefilter.py @@ -1,108 +1,107 @@ #!/usr/bin/env python3 # Copyright (c) 2016-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test processing of feefilter messages.""" from decimal import Decimal import time from test_framework.messages import msg_feefilter from test_framework.mininode import ( mininode_lock, P2PInterface, ) from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import sync_blocks, sync_mempools def hashToHex(hash): return format(hash, '064x') # Wait up to 60 secs to see if the testnode has received all the expected invs def allInvsMatch(invsExpected, testnode): for x in range(60): with mininode_lock: if (sorted(invsExpected) == sorted(testnode.txinvs)): return True time.sleep(1) return False class TestP2PConn(P2PInterface): def __init__(self): super().__init__() self.txinvs = [] def on_inv(self, message): for i in message.inv: if (i.type == 1): self.txinvs.append(hashToHex(i.hash)) def clear_invs(self): with mininode_lock: self.txinvs = [] class FeeFilterTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): node1 = self.nodes[1] node0 = self.nodes[0] # Get out of IBD node1.generate(1) - sync_blocks(self.nodes) + self.sync_blocks() self.nodes[0].add_p2p_connection(TestP2PConn()) # Test that invs are received for all txs at feerate of 20 sat/byte node1.settxfee(Decimal("0.00020000")) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert allInvsMatch(txids, self.nodes[0].p2p) self.nodes[0].p2p.clear_invs() # Set a filter of 15 sat/byte self.nodes[0].p2p.send_and_ping(msg_feefilter(15000)) # Test that txs are still being received (paying 20 sat/byte) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert allInvsMatch(txids, self.nodes[0].p2p) self.nodes[0].p2p.clear_invs() # Change tx fee rate to 10 sat/byte and test they are no longer # received node1.settxfee(Decimal("0.00010000")) [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] - sync_mempools(self.nodes) # must be sure node 0 has received all txs + self.sync_mempools() # must be sure node 0 has received all txs # Send one transaction from node0 that should be received, so that we # we can sync the test on receipt (if node1's txs were relayed, they'd # be received by the time this node0 tx is received). This is # unfortunately reliant on the current relay behavior where we batch up # to 35 entries in an inv, which means that when this next transaction # is eligible for relay, the prior transactions from node1 are eligible # as well. node0.settxfee(Decimal("0.00020000")) txids = [node0.sendtoaddress(node0.getnewaddress(), 1)] assert allInvsMatch(txids, self.nodes[0].p2p) self.nodes[0].p2p.clear_invs() # Remove fee filter and check that txs are received again self.nodes[0].p2p.send_and_ping(msg_feefilter(0)) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert allInvsMatch(txids, self.nodes[0].p2p) self.nodes[0].p2p.clear_invs() if __name__ == '__main__': FeeFilterTest().main() diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py index b2e72187c0..1911c6676a 100755 --- a/test/functional/p2p_node_network_limited.py +++ b/test/functional/p2p_node_network_limited.py @@ -1,148 +1,147 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Tests NODE_NETWORK_LIMITED. Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly and that it responds to getdata requests for blocks correctly: - send a block within 288 + 2 of the tip - disconnect peers who request blocks older than that.""" from test_framework.messages import ( CInv, msg_getdata, msg_verack, NODE_BITCOIN_CASH, NODE_BLOOM, NODE_NETWORK_LIMITED, ) from test_framework.mininode import ( mininode_lock, P2PInterface, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes_bi, disconnect_nodes, - sync_blocks, wait_until, ) class P2PIgnoreInv(P2PInterface): firstAddrnServices = 0 def on_inv(self, message): # The node will send us invs for other blocks. Ignore them. pass def on_addr(self, message): self.firstAddrnServices = message.addrs[0].nServices def wait_for_addr(self, timeout=5): def test_function(): return self.last_message.get("addr") wait_until(test_function, timeout=timeout, lock=mininode_lock) def send_getdata_for_block(self, blockhash): getdata_request = msg_getdata() getdata_request.inv.append(CInv(2, int(blockhash, 16))) self.send_message(getdata_request) class NodeNetworkLimitedTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [['-prune=550', '-addrmantest'], [], []] def disconnect_all(self): disconnect_nodes(self.nodes[0], self.nodes[1]) disconnect_nodes(self.nodes[1], self.nodes[0]) disconnect_nodes(self.nodes[2], self.nodes[1]) disconnect_nodes(self.nodes[2], self.nodes[0]) disconnect_nodes(self.nodes[0], self.nodes[2]) disconnect_nodes(self.nodes[1], self.nodes[2]) def setup_network(self): self.add_nodes(self.num_nodes, self.extra_args) self.start_nodes() def run_test(self): node = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) expected_services = NODE_BLOOM | NODE_BITCOIN_CASH | NODE_NETWORK_LIMITED self.log.info("Check that node has signalled expected services.") assert_equal(node.nServices, expected_services) self.log.info("Check that the localservices is as expected.") assert_equal(int(self.nodes[0].getnetworkinfo()[ 'localservices'], 16), expected_services) self.log.info( "Mine enough blocks to reach the NODE_NETWORK_LIMITED range.") connect_nodes_bi(self.nodes[0], self.nodes[1]) blocks = self.nodes[1].generatetoaddress( 292, self.nodes[1].get_deterministic_priv_key().address) - sync_blocks([self.nodes[0], self.nodes[1]]) + self.sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Make sure we can max retrieve block at tip-288.") # last block in valid range node.send_getdata_for_block(blocks[1]) node.wait_for_block(int(blocks[1], 16), timeout=3) self.log.info( "Requesting block at height 2 (tip-289) must fail (ignored).") # first block outside of the 288+2 limit node.send_getdata_for_block(blocks[0]) node.wait_for_disconnect(5) self.log.info("Check local address relay, do a fresh connection.") self.nodes[0].disconnect_p2ps() node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) node1.send_message(msg_verack()) node1.wait_for_addr() # must relay address with NODE_NETWORK_LIMITED assert_equal(node1.firstAddrnServices, expected_services) self.nodes[0].disconnect_p2ps() node1.wait_for_disconnect() # connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer # because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, # sync must not be possible connect_nodes_bi(self.nodes[0], self.nodes[2]) try: - sync_blocks([self.nodes[0], self.nodes[2]], timeout=5) + self.sync_blocks([self.nodes[0], self.nodes[2]], timeout=5) except Exception: pass # node2 must remain at heigh 0 assert_equal(self.nodes[2].getblockheader( self.nodes[2].getbestblockhash())['height'], 0) # now connect also to node 1 (non pruned) connect_nodes_bi(self.nodes[1], self.nodes[2]) # sync must be possible - sync_blocks(self.nodes) + self.sync_blocks() # disconnect all peers self.disconnect_all() # mine 10 blocks on node 0 (pruned node) self.nodes[0].generatetoaddress( 10, self.nodes[0].get_deterministic_priv_key().address) # connect node1 (non pruned) with node0 (pruned) and check if the can # sync connect_nodes_bi(self.nodes[0], self.nodes[1]) # sync must be possible, node 1 is no longer in IBD and should # therefore connect to node 0 (NODE_NETWORK_LIMITED) - sync_blocks([self.nodes[0], self.nodes[1]]) + self.sync_blocks([self.nodes[0], self.nodes[1]]) if __name__ == '__main__': NodeNetworkLimitedTest().main() diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py index ab9232d228..c880a0f3de 100755 --- a/test/functional/p2p_sendheaders.py +++ b/test/functional/p2p_sendheaders.py @@ -1,646 +1,649 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test behavior of headers messages to announce blocks. Setup: - Two nodes: - node0 is the node-under-test. We create two p2p connections to it. The first p2p connection is a control and should only ever receive inv's. The second p2p connection tests the headers sending logic. - node1 is used to create reorgs. test_null_locators ================== Sends two getheaders requests with null locator values. First request's hashstop value refers to validated block, while second request's hashstop value refers to a block which hasn't been validated. Verifies only the first request returns headers. test_nonnull_locators ===================== Part 1: No headers announcements before "sendheaders" a. node mines a block [expect: inv] send getdata for the block [expect: block] b. node mines another block [expect: inv] send getheaders and getdata [expect: headers, then block] c. node mines another block [expect: inv] peer mines a block, announces with header [expect: getdata] d. node mines another block [expect: inv] Part 2: After "sendheaders", headers announcements should generally work. a. peer sends sendheaders [expect: no response] peer sends getheaders with current tip [expect: no response] b. node mines a block [expect: tip header] c. for N in 1, ..., 10: * for announce-type in {inv, header} - peer mines N blocks, announces with announce-type [ expect: getheaders/getdata or getdata, deliver block(s) ] - node mines a block [ expect: 1 header ] Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer. - For response-type in {inv, getheaders} * node mines a 7 block reorg [ expect: headers announcement of 8 blocks ] * node mines an 8-block reorg [ expect: inv at tip ] * peer responds with getblocks/getdata [expect: inv, blocks ] * node mines another block [ expect: inv at tip, peer sends getdata, expect: block ] * node mines another block at tip [ expect: inv ] * peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers] * peer requests block [ expect: block ] * node mines another block at tip [ expect: inv, peer sends getdata, expect: block ] * peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block] * node mines 1 block [expect: 1 header, peer responds with getdata] Part 4: Test direct fetch behavior a. Announce 2 old block headers. Expect: no getdata requests. b. Announce 3 new blocks via 1 headers message. Expect: one getdata request for all 3 blocks. (Send blocks.) c. Announce 1 header that forks off the last two blocks. Expect: no response. d. Announce 1 more header that builds on that fork. Expect: one getdata request for two blocks. e. Announce 16 more headers that build on that fork. Expect: getdata request for 14 more blocks. f. Announce 1 more header that builds on that fork. Expect: no response. Part 5: Test handling of headers that don't connect. a. Repeat 10 times: 1. Announce a header that doesn't connect. Expect: getheaders message 2. Send headers chain. Expect: getdata for the missing blocks, tip update. b. Then send 9 more headers that don't connect. Expect: getheaders message each time. c. Announce a header that does connect. Expect: no response. d. Announce 49 headers that don't connect. Expect: getheaders message each time. e. Announce one more that doesn't connect. Expect: disconnect. """ from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import ( CBlockHeader, CInv, msg_block, msg_getblocks, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendheaders, ) from test_framework.mininode import ( mininode_lock, P2PInterface, ) from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, sync_blocks, wait_until +from test_framework.util import ( + assert_equal, + wait_until, +) DIRECT_FETCH_RESPONSE_TIME = 0.05 class BaseNode(P2PInterface): def __init__(self): super().__init__() self.block_announced = False self.last_blockhash_announced = None self.recent_headers_announced = [] def send_get_data(self, block_hashes): """Request data for a list of block hashes.""" msg = msg_getdata() for x in block_hashes: msg.inv.append(CInv(2, x)) self.send_message(msg) def send_get_headers(self, locator, hashstop): msg = msg_getheaders() msg.locator.vHave = locator msg.hashstop = hashstop self.send_message(msg) def send_block_inv(self, blockhash): msg = msg_inv() msg.inv = [CInv(2, blockhash)] self.send_message(msg) def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) def send_getblocks(self, locator): getblocks_message = msg_getblocks() getblocks_message.locator.vHave = locator self.send_message(getblocks_message) def wait_for_getdata(self, hash_list, timeout=60): if hash_list == []: return def test_function(): return "getdata" in self.last_message and [ x.hash for x in self.last_message["getdata"].inv] == hash_list wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_block_announcement(self, block_hash, timeout=60): def test_function(): return self.last_blockhash_announced == block_hash wait_until(test_function, timeout=timeout, lock=mininode_lock) def on_inv(self, message): self.block_announced = True self.last_blockhash_announced = message.inv[-1].hash def on_headers(self, message): if len(message.headers): self.block_announced = True for x in message.headers: x.calc_sha256() # append because headers may be announced over multiple # messages. self.recent_headers_announced.append(x.sha256) self.last_blockhash_announced = message.headers[-1].sha256 def clear_block_announcements(self): with mininode_lock: self.block_announced = False self.last_message.pop("inv", None) self.last_message.pop("headers", None) self.recent_headers_announced = [] def check_last_headers_announcement(self, headers): """Test whether the last headers announcements received are right. Headers may be announced across more than one message.""" def test_function(): return (len(self.recent_headers_announced) >= len(headers)) wait_until(test_function, timeout=60, lock=mininode_lock) with mininode_lock: assert_equal(self.recent_headers_announced, headers) self.block_announced = False self.last_message.pop("headers", None) self.recent_headers_announced = [] def check_last_inv_announcement(self, inv): """Test whether the last announcement received had the right inv. inv should be a list of block hashes.""" def test_function(): return self.block_announced wait_until(test_function, timeout=60, lock=mininode_lock) with mininode_lock: compare_inv = [] if "inv" in self.last_message: compare_inv = [x.hash for x in self.last_message["inv"].inv] assert_equal(compare_inv, inv) self.block_announced = False self.last_message.pop("inv", None) class SendHeadersTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [["-noparkdeepreorg"], ["-noparkdeepreorg"]] def mine_blocks(self, count): """Mine count blocks and return the new tip.""" # Clear out block announcements from each p2p listener [x.clear_block_announcements() for x in self.nodes[0].p2ps] self.nodes[0].generatetoaddress( count, self.nodes[0].get_deterministic_priv_key().address) return int(self.nodes[0].getbestblockhash(), 16) def mine_reorg(self, length): """Mine a reorg that invalidates length blocks (replacing them with # length+1 blocks). Note: we clear the state of our p2p connections after the to-be-reorged-out blocks are mined, so that we don't break later tests. return the list of block hashes newly mined.""" # make sure all invalidated blocks are node0's self.nodes[0].generatetoaddress( length, self.nodes[0].get_deterministic_priv_key().address) - sync_blocks(self.nodes, wait=0.1) + self.sync_blocks(self.nodes, wait=0.1) for x in self.nodes[0].p2ps: x.wait_for_block_announcement( int(self.nodes[0].getbestblockhash(), 16)) x.clear_block_announcements() tip_height = self.nodes[1].getblockcount() hash_to_invalidate = self.nodes[1].getblockhash( tip_height - (length - 1)) self.nodes[1].invalidateblock(hash_to_invalidate) # Must be longer than the orig chain all_hashes = self.nodes[1].generatetoaddress( length + 1, self.nodes[1].get_deterministic_priv_key().address) - sync_blocks(self.nodes, wait=0.1) + self.sync_blocks(self.nodes, wait=0.1) return [int(x, 16) for x in all_hashes] def run_test(self): # Setup the p2p connections inv_node = self.nodes[0].add_p2p_connection(BaseNode()) # Make sure NODE_NETWORK is not set for test_node, so no block download # will occur outside of direct fetching test_node = self.nodes[0].add_p2p_connection(BaseNode(), services=0) # Ensure verack's have been processed by our peer inv_node.sync_with_ping() test_node.sync_with_ping() self.test_null_locators(test_node, inv_node) self.test_nonnull_locators(test_node, inv_node) def test_null_locators(self, test_node, inv_node): tip = self.nodes[0].getblockheader(self.nodes[0].generatetoaddress( 1, self.nodes[0].get_deterministic_priv_key().address)[0]) tip_hash = int(tip["hash"], 16) inv_node.check_last_inv_announcement(inv=[tip_hash]) test_node.check_last_inv_announcement(inv=[tip_hash]) self.log.info( "Verify getheaders with null locator and valid hashstop returns headers.") test_node.clear_block_announcements() test_node.send_get_headers(locator=[], hashstop=tip_hash) test_node.check_last_headers_announcement(headers=[tip_hash]) self.log.info( "Verify getheaders with null locator and invalid hashstop does not return headers.") block = create_block(int(tip["hash"], 16), create_coinbase( tip["height"] + 1), tip["mediantime"] + 1) block.solve() test_node.send_header_for_blocks([block]) test_node.clear_block_announcements() test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16)) test_node.sync_with_ping() assert_equal(test_node.block_announced, False) inv_node.clear_block_announcements() test_node.send_message(msg_block(block)) inv_node.check_last_inv_announcement(inv=[int(block.hash, 16)]) def test_nonnull_locators(self, test_node, inv_node): tip = int(self.nodes[0].getbestblockhash(), 16) # PART 1 # 1. Mine a block; expect inv announcements each time self.log.info( "Part 1: headers don't start before sendheaders message...") for i in range(4): self.log.debug("Part 1.{}: starting...".format(i)) old_tip = tip tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) # Try a few different responses; none should affect next # announcement if i == 0: # first request the block test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # next try requesting header and block test_node.send_get_headers(locator=[old_tip], hashstop=tip) test_node.send_get_data([tip]) test_node.wait_for_block(tip) # since we requested headers... test_node.clear_block_announcements() elif i == 2: # this time announce own block via headers inv_node.clear_block_announcements() height = self.nodes[0].getblockcount() last_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] block_time = last_time + 1 new_block = create_block( tip, create_coinbase(height + 1), block_time) new_block.solve() test_node.send_header_for_blocks([new_block]) test_node.wait_for_getdata([new_block.sha256]) test_node.send_message(msg_block(new_block)) test_node.sync_with_ping() # make sure this block is processed wait_until(lambda: inv_node.block_announced, timeout=60, lock=mininode_lock) inv_node.clear_block_announcements() test_node.clear_block_announcements() self.log.info("Part 1: success!") self.log.info( "Part 2: announce blocks with headers after sendheaders message...") # PART 2 # 2. Send a sendheaders message and test that headers announcements # commence and keep working. test_node.send_message(msg_sendheaders()) prev_tip = int(self.nodes[0].getbestblockhash(), 16) test_node.send_get_headers(locator=[prev_tip], hashstop=0) test_node.sync_with_ping() # Now that we've synced headers, headers announcements should work tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) height = self.nodes[0].getblockcount() + 1 block_time += 10 # Advance far enough ahead for i in range(10): self.log.debug("Part 2.{}: starting...".format(i)) # Mine i blocks, and alternate announcing either via # inv (of tip) or via headers. After each, new blocks # mined by the node should successfully be announced # with block header, even though the blocks are never requested for j in range(2): self.log.debug("Part 2.{}.{}: starting...".format(i, j)) blocks = [] for b in range(i + 1): blocks.append(create_block( tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 if j == 0: # Announce via inv test_node.send_block_inv(tip) test_node.wait_for_getheaders() # Should have received a getheaders now test_node.send_header_for_blocks(blocks) # Test that duplicate inv's won't result in duplicate # getdata requests, or duplicate headers announcements [inv_node.send_block_inv(x.sha256) for x in blocks] test_node.wait_for_getdata([x.sha256 for x in blocks]) inv_node.sync_with_ping() else: # Announce via headers test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) # Test that duplicate headers won't result in duplicate # getdata requests (the check is further down) inv_node.send_header_for_blocks(blocks) inv_node.sync_with_ping() [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() inv_node.sync_with_ping() # This block should not be announced to the inv node (since it also # broadcast it) assert "inv" not in inv_node.last_message assert "headers" not in inv_node.last_message tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) height += 1 block_time += 1 self.log.info("Part 2: success!") self.log.info( "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...") # PART 3. Headers announcements can stop after large reorg, and resume after # getheaders or inv from peer. for j in range(2): self.log.debug("Part 3.{}: starting...".format(j)) # First try mining a reorg that can propagate with header # announcement new_block_hashes = self.mine_reorg(length=7) tip = new_block_hashes[-1] inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=new_block_hashes) block_time += 8 # Mine a too-large reorg, which should be announced with a single # inv new_block_hashes = self.mine_reorg(length=8) tip = new_block_hashes[-1] inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) block_time += 9 fork_point = self.nodes[0].getblock("{:064x}".format( new_block_hashes[0]))["previousblockhash"] fork_point = int(fork_point, 16) # Use getblocks/getdata test_node.send_getblocks(locator=[fork_point]) test_node.check_last_inv_announcement(inv=new_block_hashes) test_node.send_get_data(new_block_hashes) test_node.wait_for_block(new_block_hashes[-1]) for i in range(3): self.log.debug("Part 3.{}.{}: starting...".format(j, i)) # Mine another block, still should get only an inv tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) if i == 0: # Just get the data -- shouldn't cause headers # announcements to resume test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # Send a getheaders message that shouldn't trigger headers announcements # to resume (best header sent will be too old) test_node.send_get_headers( locator=[fork_point], hashstop=new_block_hashes[1]) test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 2: # This time, try sending either a getheaders to trigger resumption # of headers announcements, or mine a new block and inv it, also # triggering resumption of headers announcements. test_node.send_get_data([tip]) test_node.wait_for_block(tip) if j == 0: test_node.send_get_headers(locator=[tip], hashstop=0) test_node.sync_with_ping() else: test_node.send_block_inv(tip) test_node.sync_with_ping() # New blocks should now be announced with header tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) self.log.info("Part 3: success!") self.log.info("Part 4: Testing direct fetch behavior...") tip = self.mine_blocks(1) height = self.nodes[0].getblockcount() + 1 last_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] block_time = last_time + 1 # Create 2 blocks. Send the blocks, then send the headers. blocks = [] for b in range(2): blocks.append(create_block( tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 inv_node.send_message(msg_block(blocks[-1])) inv_node.sync_with_ping() # Make sure blocks are processed test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() # should not have received any getdata messages with mininode_lock: assert "getdata" not in test_node.last_message # This time, direct fetch should work blocks = [] for b in range(3): blocks.append(create_block( tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() test_node.wait_for_getdata( [x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME) [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() # Now announce a header that forks the last two blocks tip = blocks[0].sha256 height -= 1 blocks = [] # Create extra blocks for later for b in range(20): blocks.append(create_block( tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 # Announcing one block on fork should not trigger direct fetch # (less work than tip) test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[0:1]) test_node.sync_with_ping() with mininode_lock: assert "getdata" not in test_node.last_message # Announcing one more block on fork should trigger direct fetch for # both blocks (same work as tip) test_node.send_header_for_blocks(blocks[1:2]) test_node.sync_with_ping() test_node.wait_for_getdata( [x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME) # Announcing 16 more headers should trigger direct fetch for 14 more # blocks test_node.send_header_for_blocks(blocks[2:18]) test_node.sync_with_ping() test_node.wait_for_getdata( [x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME) # Announcing 1 more header should not trigger any response test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[18:19]) test_node.sync_with_ping() with mininode_lock: assert "getdata" not in test_node.last_message self.log.info("Part 4: success!") # Now deliver all those blocks we announced. [test_node.send_message(msg_block(x)) for x in blocks] self.log.info("Part 5: Testing handling of unconnecting headers") # First we test that receipt of an unconnecting header doesn't prevent # chain sync. for i in range(10): self.log.debug("Part 5.{}: starting...".format(i)) test_node.last_message.pop("getdata", None) blocks = [] # Create two more blocks. for j in range(2): blocks.append(create_block( tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 # Send the header of the second block -> this won't connect. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[1]]) test_node.wait_for_getheaders() test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() assert_equal( int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) blocks = [] # Now we test that if we repeatedly don't send connecting headers, we # don't go into an infinite loop trying to get them to connect. MAX_UNCONNECTING_HEADERS = 10 for j in range(MAX_UNCONNECTING_HEADERS + 1): blocks.append(create_block( tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 for i in range(1, MAX_UNCONNECTING_HEADERS): # Send a header that doesn't connect, check that we get a # getheaders. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i]]) test_node.wait_for_getheaders() # Next header will connect, should re-set our count: test_node.send_header_for_blocks([blocks[0]]) # Remove the first two entries (blocks[1] would connect): blocks = blocks[2:] # Now try to see how many unconnecting headers we can send # before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS for i in range(5 * MAX_UNCONNECTING_HEADERS - 1): # Send a header that doesn't connect, check that we get a # getheaders. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i % len(blocks)]]) test_node.wait_for_getheaders() # Eventually this stops working. test_node.send_header_for_blocks([blocks[-1]]) # Should get disconnected test_node.wait_for_disconnect() self.log.info("Part 5: success!") # Finally, check that the inv node never received a getdata request, # throughout the test assert "getdata" not in inv_node.last_message if __name__ == '__main__': SendHeadersTest().main() diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py index 9f5d433caa..5a0b4c90ae 100755 --- a/test/functional/p2p_unrequested_blocks.py +++ b/test/functional/p2p_unrequested_blocks.py @@ -1,359 +1,358 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test processing of unrequested blocks. Setup: two nodes, node0+node1, not connected to each other. Node1 will have nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks. We have one P2PInterface connection to node0 called test_node, and one to node1 called min_work_node. The test: 1. Generate one block on each node, to leave IBD. 2. Mine a new block on each tip, and deliver to each node from node's peer. The tip should advance for node0, but node1 should skip processing due to nMinimumChainWork. Node1 is unused in tests 3-7: 3. Mine a block that forks from the genesis block, and deliver to test_node. Node0 should not process this block (just accept the header), because it is unrequested and doesn't have more or equal work to the tip. 4a,b. Send another two blocks that build on the forking block. Node0 should process the second block but be stuck on the shorter chain, because it's missing an intermediate block. 4c.Send 288 more blocks on the longer chain (the number of blocks ahead we currently store). Node0 should process all but the last block (too far ahead in height). 5. Send a duplicate of the block in #3 to Node0. Node0 should not process the block because it is unrequested, and stay on the shorter chain. 6. Send Node0 an inv for the height 3 block produced in #4 above. Node0 should figure out that Node0 has the missing height 2 block and send a getdata. 7. Send Node0 the missing block again. Node0 should process and the tip should advance. 8. Create a fork which is invalid at a height longer than the current chain (ie to which the node will try to reorg) but which has headers built on top of the invalid block. Check that we get disconnected if we send more headers on the chain the node now knows to be invalid. 9. Test Node1 is able to sync when connected to node0 (which should have sufficient work on its chain). """ import time from test_framework.blocktools import ( create_block, create_coinbase, create_tx_with_script, ) from test_framework.messages import ( CBlockHeader, CInv, msg_block, msg_headers, msg_inv, ) from test_framework.mininode import ( mininode_lock, P2PInterface, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, connect_nodes, - sync_blocks, ) class AcceptBlockTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [["-noparkdeepreorg"], ["-minimumchainwork=0x10"]] def setup_network(self): # Node0 will be used to test behavior of processing unrequested blocks # from peers which are not whitelisted, while Node1 will be used for # the whitelisted case. # Node2 will be used for non-whitelisted peers to test the interaction # with nMinimumChainWork. self.setup_nodes() def run_test(self): # Setup the p2p connections # test_node connects to node0 (not whitelisted) test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # min_work_node connects to node1 (whitelisted) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) # 1. Have nodes mine a block (leave IBD) [n.generatetoaddress(1, n.get_deterministic_priv_key().address) for n in self.nodes] tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes] # 2. Send one block that builds on each tip. # This should be accepted by node0 blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): blocks_h2.append(create_block( tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) min_work_node.send_message(msg_block(blocks_h2[1])) for x in [test_node, min_work_node]: x.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 1) self.log.info( "First height 2 block accepted by node0; correctly rejected by node1") # 3. Send another block that builds on genesis. block_h1f = create_block( int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time) block_time += 1 block_h1f.solve() test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h1f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) # 4. Send another two block that build on the fork. block_h2f = create_block( block_h1f.sha256, create_coinbase(2), block_time) block_time += 1 block_h2f.solve() test_node.send_message(msg_block(block_h2f)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h2f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found # But this block should be accepted by node since it has equal work. self.nodes[0].getblock(block_h2f.hash) self.log.info("Second height 2 block accepted, but not reorg'ed to") # 4b. Now send another block that builds on the forking chain. block_h3 = create_block( block_h2f.sha256, create_coinbase(3), block_h2f.nTime + 1) block_h3.solve() test_node.send_message(msg_block(block_h3)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h3.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found self.nodes[0].getblock(block_h3.hash) # But this block should be accepted by node since it has more work. self.nodes[0].getblock(block_h3.hash) self.log.info("Unrequested more-work block accepted") # 4c. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node (as long as it is not missing any # headers) tip = block_h3 all_blocks = [] for i in range(288): next_block = create_block( tip.sha256, create_coinbase(i + 4), tip.nTime + 1) next_block.solve() all_blocks.append(next_block) tip = next_block # Now send the block at height 5 and check that it wasn't accepted # (missing header) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) # The block at height 5 should be accepted if we provide the missing # header, though headers_message = msg_headers() headers_message.headers.append(CBlockHeader(all_blocks[0])) test_node.send_message(headers_message) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() self.nodes[0].getblock(all_blocks[1].hash) # Now send the blocks in all_blocks for i in range(288): test_node.send_message(msg_block(all_blocks[i])) test_node.sync_with_ping() # Blocks 1-287 should be accepted, block 288 should be ignored because # it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error( -1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). # The node should have requested the blocks at some point, so # disconnect/reconnect first self.nodes[0].disconnect_p2ps() self.nodes[1].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info( "Unrequested block that would complete more-work chain was ignored") # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(msg_inv([CInv(2, block_h3.sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, block_h1f.sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.nodes[0].getblock(all_blocks[286].hash) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) self.log.info( "Successfully reorged to longer chain from non-whitelisted peer") # 8. Create a chain which is invalid at a height longer than the # current chain, but which has more blocks on top of that block_289f = create_block( all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime + 1) block_289f.solve() block_290f = create_block( block_289f.sha256, create_coinbase(290), block_289f.nTime + 1) block_290f.solve() block_291 = create_block( block_290f.sha256, create_coinbase(291), block_290f.nTime + 1) # block_291 spends a coinbase below maturity! block_291.vtx.append(create_tx_with_script( block_290f.vtx[0], 0, script_sig=b"42", amount=1)) block_291.hashMerkleRoot = block_291.calc_merkle_root() block_291.solve() block_292 = create_block( block_291.sha256, create_coinbase(292), block_291.nTime + 1) block_292.solve() # Now send all the headers on the chain and enough blocks to trigger # reorg headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_289f)) headers_message.headers.append(CBlockHeader(block_290f)) headers_message.headers.append(CBlockHeader(block_291)) headers_message.headers.append(CBlockHeader(block_292)) test_node.send_message(headers_message) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_292.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash) test_node.send_message(msg_block(block_289f)) test_node.send_message(msg_block(block_290f)) test_node.sync_with_ping() self.nodes[0].getblock(block_289f.hash) self.nodes[0].getblock(block_290f.hash) test_node.send_message(msg_block(block_291)) # At this point we've sent an obviously-bogus block, wait for full processing # without assuming whether we will be disconnected or not try: # Only wait a short while so the test doesn't take forever if we do get # disconnected test_node.sync_with_ping(timeout=1) except AssertionError: test_node.wait_for_disconnect() self.nodes[0].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # We should have failed reorg and switched back to 290 (but have block # 291) assert_equal(self.nodes[0].getblockcount(), 290) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_equal(self.nodes[0].getblock( block_291.hash)["confirmations"], -1) # Now send a new header on the invalid chain, indicating we're forked # off, and expect to get disconnected block_293 = create_block( block_292.sha256, create_coinbase(293), block_292.nTime + 1) block_293.solve() headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_293)) test_node.send_message(headers_message) test_node.wait_for_disconnect() # 9. Connect node1 to node0 and ensure it is able to sync connect_nodes(self.nodes[0], self.nodes[1]) - sync_blocks([self.nodes[0], self.nodes[1]]) + self.sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Successfully synced nodes 1 and 0") if __name__ == '__main__': AcceptBlockTest().main() diff --git a/test/functional/rpc_getchaintips.py b/test/functional/rpc_getchaintips.py index 0ed74ed10c..56c2167906 100755 --- a/test/functional/rpc_getchaintips.py +++ b/test/functional/rpc_getchaintips.py @@ -1,68 +1,68 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the getchaintips RPC. - introduce a network split - work on chains of different lengths - join the network together again - verify that getchaintips now returns two chain tips. """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class GetChainTipsTest (BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.extra_args = [["-noparkdeepreorg"], ["-noparkdeepreorg"], [], []] def run_test(self): tips = self.nodes[0].getchaintips() assert_equal(len(tips), 1) assert_equal(tips[0]['branchlen'], 0) assert_equal(tips[0]['height'], 200) assert_equal(tips[0]['status'], 'active') # Split the network and build two chains of different lengths. self.split_network() self.nodes[0].generatetoaddress( 10, self.nodes[0].get_deterministic_priv_key().address) self.nodes[2].generatetoaddress( 20, self.nodes[2].get_deterministic_priv_key().address) self.sync_all(self.nodes[:2]) self.sync_all(self.nodes[2:]) tips = self.nodes[1].getchaintips() assert_equal(len(tips), 1) shortTip = tips[0] assert_equal(shortTip['branchlen'], 0) assert_equal(shortTip['height'], 210) assert_equal(tips[0]['status'], 'active') tips = self.nodes[3].getchaintips() assert_equal(len(tips), 1) longTip = tips[0] assert_equal(longTip['branchlen'], 0) assert_equal(longTip['height'], 220) assert_equal(tips[0]['status'], 'active') # Join the network halves and check that we now have two tips # (at least at the nodes that previously had the short chain). self.join_network() tips = self.nodes[0].getchaintips() assert_equal(len(tips), 2) assert_equal(tips[0], longTip) assert_equal(tips[1]['branchlen'], 10) assert_equal(tips[1]['status'], 'valid-fork') tips[1]['branchlen'] = 0 tips[1]['status'] = 'active' assert_equal(tips[1], shortTip) if __name__ == '__main__': GetChainTipsTest().main() diff --git a/test/functional/rpc_invalidateblock.py b/test/functional/rpc_invalidateblock.py index 15cea70548..b7ebfe14e0 100755 --- a/test/functional/rpc_invalidateblock.py +++ b/test/functional/rpc_invalidateblock.py @@ -1,76 +1,79 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the invalidateblock RPC.""" import time from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, connect_nodes_bi, sync_blocks +from test_framework.util import ( + assert_equal, + connect_nodes_bi, +) class InvalidateTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [["-noparkdeepreorg"], [], []] def setup_network(self): self.setup_nodes() def run_test(self): self.log.info( "Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:") self.log.info("Mine 4 blocks on Node 0") self.nodes[0].generatetoaddress( 4, self.nodes[0].get_deterministic_priv_key().address) assert self.nodes[0].getblockcount() == 4 besthash = self.nodes[0].getbestblockhash() self.log.info("Mine competing 6 blocks on Node 1") self.nodes[1].generatetoaddress( 6, self.nodes[1].get_deterministic_priv_key().address) assert self.nodes[1].getblockcount() == 6 self.log.info("Connect nodes to force a reorg") connect_nodes_bi(self.nodes[0], self.nodes[1]) - sync_blocks(self.nodes[0:2]) + self.sync_blocks(self.nodes[0:2]) assert self.nodes[0].getblockcount() == 6 badhash = self.nodes[1].getblockhash(2) self.log.info( "Invalidate block 2 on node 0 and verify we reorg to node 0's original chain") self.nodes[0].invalidateblock(badhash) newheight = self.nodes[0].getblockcount() newhash = self.nodes[0].getbestblockhash() if (newheight != 4 or newhash != besthash): raise AssertionError( "Wrong tip for node0, hash {}, height {}".format(newhash, newheight)) self.log.info("\nMake sure we won't reorg to a lower work chain:") connect_nodes_bi(self.nodes[1], self.nodes[2]) self.log.info("Sync node 2 to node 1 so both have 6 blocks") - sync_blocks(self.nodes[1:3]) + self.sync_blocks(self.nodes[1:3]) assert self.nodes[2].getblockcount() == 6 self.log.info("Invalidate block 5 on node 1 so its tip is now at 4") self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5)) assert self.nodes[1].getblockcount() == 4 self.log.info("Invalidate block 3 on node 2, so its tip is now 2") self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3)) assert self.nodes[2].getblockcount() == 2 self.log.info("..and then mine a block") self.nodes[2].generatetoaddress( 1, self.nodes[2].get_deterministic_priv_key().address) self.log.info("Verify all nodes are at the right height") time.sleep(5) assert_equal(self.nodes[2].getblockcount(), 3) assert_equal(self.nodes[0].getblockcount(), 4) node1height = self.nodes[1].getblockcount() if node1height < 4: raise AssertionError( "Node 1 reorged to a lower height: {}".format(node1height)) if __name__ == '__main__': InvalidateTest().main() diff --git a/test/functional/rpc_preciousblock.py b/test/functional/rpc_preciousblock.py index 62d70b0aae..adc0269d6a 100755 --- a/test/functional/rpc_preciousblock.py +++ b/test/functional/rpc_preciousblock.py @@ -1,128 +1,127 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the preciousblock RPC.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes_bi, - sync_blocks, ) def unidirectional_node_sync_via_rpc(node_src, node_dest): blocks_to_copy = [] blockhash = node_src.getbestblockhash() while True: try: assert len(node_dest.getblock(blockhash, False)) > 0 break except Exception: blocks_to_copy.append(blockhash) blockhash = node_src.getblockheader( blockhash, True)['previousblockhash'] blocks_to_copy.reverse() for blockhash in blocks_to_copy: blockdata = node_src.getblock(blockhash, False) assert node_dest.submitblock(blockdata) in (None, 'inconclusive') def node_sync_via_rpc(nodes): for node_src in nodes: for node_dest in nodes: if node_src is node_dest: continue unidirectional_node_sync_via_rpc(node_src, node_dest) class PreciousTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [["-noparkdeepreorg"], ["-noparkdeepreorg"], ["-noparkdeepreorg"]] def setup_network(self): self.setup_nodes() def run_test(self): self.log.info( "Ensure submitblock can in principle reorg to a competing chain") # A non-wallet address to mine to def gen_address( i): return self.nodes[i].get_deterministic_priv_key().address self.nodes[0].generatetoaddress(1, gen_address(0)) assert_equal(self.nodes[0].getblockcount(), 1) hashZ = self.nodes[1].generatetoaddress(2, gen_address(1))[-1] assert_equal(self.nodes[1].getblockcount(), 2) node_sync_via_rpc(self.nodes[0:3]) assert_equal(self.nodes[0].getbestblockhash(), hashZ) self.log.info("Mine blocks A-B-C on Node 0") hashC = self.nodes[0].generatetoaddress(3, gen_address(0))[-1] assert_equal(self.nodes[0].getblockcount(), 5) self.log.info("Mine competing blocks E-F-G on Node 1") hashG = self.nodes[1].generatetoaddress(3, gen_address(1))[-1] assert_equal(self.nodes[1].getblockcount(), 5) assert hashC != hashG self.log.info("Connect nodes and check no reorg occurs") # Submit competing blocks via RPC so any reorg should occur before we # proceed (no way to wait on inaction for p2p sync) node_sync_via_rpc(self.nodes[0:2]) connect_nodes_bi(self.nodes[0], self.nodes[1]) assert_equal(self.nodes[0].getbestblockhash(), hashC) assert_equal(self.nodes[1].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block G") self.nodes[0].preciousblock(hashG) assert_equal(self.nodes[0].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block C again") self.nodes[0].preciousblock(hashC) assert_equal(self.nodes[0].getbestblockhash(), hashC) self.log.info("Make Node1 prefer block C") self.nodes[1].preciousblock(hashC) # wait because node 1 may not have downloaded hashC - sync_blocks(self.nodes[0:2]) + self.sync_blocks(self.nodes[0:2]) assert_equal(self.nodes[1].getbestblockhash(), hashC) self.log.info("Make Node1 prefer block G again") self.nodes[1].preciousblock(hashG) assert_equal(self.nodes[1].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block G again") self.nodes[0].preciousblock(hashG) assert_equal(self.nodes[0].getbestblockhash(), hashG) self.log.info("Make Node1 prefer block C again") self.nodes[1].preciousblock(hashC) assert_equal(self.nodes[1].getbestblockhash(), hashC) self.log.info( "Mine another block (E-F-G-)H on Node 0 and reorg Node 1") self.nodes[0].generatetoaddress(1, gen_address(0)) assert_equal(self.nodes[0].getblockcount(), 6) - sync_blocks(self.nodes[0:2]) + self.sync_blocks(self.nodes[0:2]) hashH = self.nodes[0].getbestblockhash() assert_equal(self.nodes[1].getbestblockhash(), hashH) self.log.info("Node1 should not be able to prefer block C anymore") self.nodes[1].preciousblock(hashC) assert_equal(self.nodes[1].getbestblockhash(), hashH) self.log.info("Mine competing blocks I-J-K-L on Node 2") self.nodes[2].generatetoaddress(4, gen_address(2)) assert_equal(self.nodes[2].getblockcount(), 6) hashL = self.nodes[2].getbestblockhash() self.log.info("Connect nodes and check no reorg occurs") node_sync_via_rpc(self.nodes[1:3]) connect_nodes_bi(self.nodes[1], self.nodes[2]) connect_nodes_bi(self.nodes[0], self.nodes[2]) assert_equal(self.nodes[0].getbestblockhash(), hashH) assert_equal(self.nodes[1].getbestblockhash(), hashH) assert_equal(self.nodes[2].getbestblockhash(), hashL) self.log.info("Make Node1 prefer block L") self.nodes[1].preciousblock(hashL) assert_equal(self.nodes[1].getbestblockhash(), hashL) self.log.info("Make Node2 prefer block H") self.nodes[2].preciousblock(hashH) assert_equal(self.nodes[2].getbestblockhash(), hashH) if __name__ == '__main__': PreciousTest().main() diff --git a/test/functional/wallet_abandonconflict.py b/test/functional/wallet_abandonconflict.py index a7a798da0a..1edb0c5e36 100755 --- a/test/functional/wallet_abandonconflict.py +++ b/test/functional/wallet_abandonconflict.py @@ -1,224 +1,222 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the abandontransaction RPC. The abandontransaction RPC marks a transaction and all its in-wallet descendants as abandoned which allows their inputs to be respent. It can be used to replace "stuck" or evicted transactions. It only works on transactions which are not included in a block and are not currently in the mempool. It has no effect on transactions which are already abandoned. """ from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, connect_nodes, disconnect_nodes, satoshi_round, - sync_blocks, - sync_mempools, ) class AbandonConflictTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [["-minrelaytxfee=0.00001"], []] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): def total_fees(*txids): total = 0 for txid in txids: # '-=' is because gettransaction(txid)['fee'] returns a negative total -= self.nodes[0].gettransaction(txid)['fee'] return satoshi_round(total) self.nodes[1].generate(100) - sync_blocks(self.nodes) + self.sync_blocks() balance = self.nodes[0].getbalance() txA = self.nodes[0].sendtoaddress( self.nodes[0].getnewaddress(), Decimal("10")) txB = self.nodes[0].sendtoaddress( self.nodes[0].getnewaddress(), Decimal("10")) txC = self.nodes[0].sendtoaddress( self.nodes[0].getnewaddress(), Decimal("10")) - sync_mempools(self.nodes) + self.sync_mempools() self.nodes[1].generate(1) # Can not abandon non-wallet transaction assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', lambda: self.nodes[0].abandontransaction(txid='ff' * 32)) # Can not abandon confirmed transaction assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: self.nodes[0].abandontransaction(txid=txA)) - sync_blocks(self.nodes) + self.sync_blocks() newbalance = self.nodes[0].getbalance() # no more than fees lost assert balance - newbalance <= total_fees(txA, txB, txC) balance = newbalance # Disconnect nodes so node0's transactions don't get into node1's # mempool disconnect_nodes(self.nodes[0], self.nodes[1]) # Identify the 10btc outputs nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction( txA)["details"] if tx_out["amount"] == Decimal("10")) nB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction( txB)["details"] if tx_out["amount"] == Decimal("10")) nC = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction( txC)["details"] if tx_out["amount"] == Decimal("10")) inputs = [] # spend 10btc outputs from txA and txB inputs.append({"txid": txA, "vout": nA}) inputs.append({"txid": txB, "vout": nB}) outputs = {} outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998") outputs[self.nodes[1].getnewaddress()] = Decimal("5") signed = self.nodes[0].signrawtransactionwithwallet( self.nodes[0].createrawtransaction(inputs, outputs)) txAB1 = self.nodes[0].sendrawtransaction(signed["hex"]) # Identify the 14.99998btc output nAB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction( txAB1)["details"] if tx_out["amount"] == Decimal("14.99998")) # Create a child tx spending AB1 and C inputs = [] # Amount 14.99998 BCH inputs.append({"txid": txAB1, "vout": nAB}) # Amount 10 BCH inputs.append({"txid": txC, "vout": nC}) outputs = {} outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996") signed2 = self.nodes[0].signrawtransactionwithwallet( self.nodes[0].createrawtransaction(inputs, outputs)) txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"]) # Create a child tx spending ABC2 signed3_change = Decimal("24.999") inputs = [{"txid": txABC2, "vout": 0}] outputs = {self.nodes[0].getnewaddress(): signed3_change} signed3 = self.nodes[0].signrawtransactionwithwallet( self.nodes[0].createrawtransaction(inputs, outputs)) # note tx is never directly referenced, only abandoned as a child of # the above self.nodes[0].sendrawtransaction(signed3["hex"]) # In mempool txs from self should increase balance from change newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("30") + signed3_change) balance = newbalance # Restart the node with a higher min relay fee so the parent tx is no longer in mempool # TODO: redo with eviction self.stop_node(0) self.start_node(0, extra_args=["-minrelaytxfee=0.0001"]) # Verify txs no longer in either node's mempool assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(len(self.nodes[1].getrawmempool()), 0) # Transactions which are not in the mempool should only reduce wallet balance. # Transaction inputs should still be spent, but the change not yet # received. newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - signed3_change) # Unconfirmed received funds that are not in mempool also shouldn't show # up in unconfirmed balance. Note that the transactions stored in the wallet # are not necessarily in the node's mempool. unconfbalance = self.nodes[0].getunconfirmedbalance( ) + self.nodes[0].getbalance() assert_equal(unconfbalance, newbalance) # Unconfirmed transactions which are not in the mempool should also # not be in listunspent assert txABC2 not in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)] balance = newbalance # Abandon original transaction and verify inputs are available again # including that the child tx was also abandoned self.nodes[0].abandontransaction(txAB1) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance + Decimal("30")) balance = newbalance # Verify that even with a low min relay fee, the tx is not re-accepted # from wallet on startup once abandoned. self.stop_node(0) self.start_node(0, extra_args=["-minrelaytxfee=0.00001"]) assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(self.nodes[0].getbalance(), balance) # If the transaction is re-sent the wallet also unabandons it. The # change should be available, and it's child transaction should remain # abandoned. # NOTE: Abandoned transactions are internal to the wallet, and tracked # separately from other indices. self.nodes[0].sendrawtransaction(signed["hex"]) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998")) balance = newbalance # Send child tx again so it is no longer abandoned. self.nodes[0].sendrawtransaction(signed2["hex"]) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996")) balance = newbalance # Reset to a higher relay fee so that we abandon a transaction self.stop_node(0) self.start_node(0, extra_args=["-minrelaytxfee=0.0001"]) assert_equal(len(self.nodes[0].getrawmempool()), 0) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("24.9996")) balance = newbalance # Create a double spend of AB1. Spend it again from only A's 10 output. # Mine double spend from node 1. inputs = [] inputs.append({"txid": txA, "vout": nA}) outputs = {} outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999") tx = self.nodes[0].createrawtransaction(inputs, outputs) signed = self.nodes[0].signrawtransactionwithwallet(tx) self.nodes[1].sendrawtransaction(signed["hex"]) self.nodes[1].generate(1) connect_nodes(self.nodes[0], self.nodes[1]) - sync_blocks(self.nodes) + self.sync_blocks() # Verify that B and C's 10 BCH outputs are available for spending again # because AB1 is now conflicted newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance + Decimal("20")) balance = newbalance # There is currently a minor bug around this and so this test doesn't work. See Issue #7315 # Invalidate the block with the double spend and B's 10 BCH output should no longer be available # Don't think C's should either self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) newbalance = self.nodes[0].getbalance() # assert_equal(newbalance, balance - Decimal("10")) self.log.info( "If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer") self.log.info( "conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315") self.log.info(str(balance) + " -> " + str(newbalance) + " ?") if __name__ == '__main__': AbandonConflictTest().main() diff --git a/test/functional/wallet_address_types.py b/test/functional/wallet_address_types.py index dfaca3eddf..181b361132 100755 --- a/test/functional/wallet_address_types.py +++ b/test/functional/wallet_address_types.py @@ -1,286 +1,284 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test that the wallet can send and receive using all combinations of address types. There are 4 nodes-under-test: - node0 uses legacy addresses - node1 uses legacy addresses - node2 uses legacy addresses - node3 uses legacy addresses node4 exists to generate new blocks. ## Multisig address test Test that adding a multisig address with: - an uncompressed pubkey always gives a legacy address - only compressed pubkeys gives the an `-addresstype` address ## Sending to address types test A series of tests, iterating over node0-node3. In each iteration of the test, one node sends: - 10/101th of its balance to itself (using getrawchangeaddress for single key addresses) - 20/101th to the next node - 30/101th to the node after that - 40/101th to the remaining node - 1/101th remains as fee+change Iterate over each node for single key addresses, and then over each node for multisig addresses. Repeat test. As every node sends coins after receiving, this also verifies that spending coins sent to all these address types works. """ from decimal import Decimal import itertools from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, connect_nodes_bi, - sync_blocks, - sync_mempools, ) class AddressTypeTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 5 # whitelist all peers to speed up tx relay / mempool sync self.extra_args = [["-whitelist=127.0.0.1"]] * self.num_nodes def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self): self.setup_nodes() # Fully mesh-connect nodes for faster mempool sync for i, j in itertools.product(range(self.num_nodes), repeat=2): if i > j: connect_nodes_bi(self.nodes[i], self.nodes[j]) self.sync_all() def get_balances(self, confirmed=True): """Return a list of confirmed or unconfirmed balances.""" if confirmed: return [self.nodes[i].getbalance() for i in range(4)] else: return [self.nodes[i].getunconfirmedbalance() for i in range(4)] def test_address(self, node, address, multisig, typ): """Run sanity checks on an address.""" self.log.info(address) info = self.nodes[node].getaddressinfo(address) assert(self.nodes[node].validateaddress(address)['isvalid']) assert_equal(info.get('solvable'), True) if not multisig and typ == 'legacy': # P2PKH assert(not info['isscript']) assert('pubkey' in info) elif typ == 'legacy': # P2SH-multisig assert(info['isscript']) assert_equal(info['script'], 'multisig') assert('pubkeys' in info) else: # Unknown type assert(False) def test_desc(self, node, address, multisig, typ, utxo): """Run sanity checks on a descriptor reported by getaddressinfo.""" info = self.nodes[node].getaddressinfo(address) assert('desc' in info) assert_equal(info['desc'], utxo['desc']) assert(self.nodes[node].validateaddress(address)['isvalid']) # Use a ridiculously roundabout way to find the key origin info through # the PSBT logic. However, this does test consistency between the PSBT reported # fingerprints/paths and the descriptor logic. psbt = self.nodes[node].createpsbt( [{'txid': utxo['txid'], 'vout':utxo['vout']}], [{address: 0.00010000}]) psbt = self.nodes[node].walletprocesspsbt( psbt, False, "ALL|FORKID", True) decode = self.nodes[node].decodepsbt(psbt['psbt']) key_descs = {} for deriv in decode['inputs'][0]['bip32_derivs']: assert_equal(len(deriv['master_fingerprint']), 8) assert_equal(deriv['path'][0], 'm') key_descs[deriv['pubkey']] = '[' + deriv['master_fingerprint'] + \ deriv['path'][1:] + ']' + deriv['pubkey'] if not multisig and typ == 'legacy': # P2PKH assert_equal(info['desc'], "pkh({})".format(key_descs[info['pubkey']])) elif typ == 'legacy': # P2SH-multisig assert_equal(info['desc'], "sh(multi(2,{},{}))".format( key_descs[info['pubkeys'][0]], key_descs[info['pubkeys'][1]])) else: # Unknown type assert(False) def test_change_output_type( self, node_sender, destinations, expected_type): txid = self.nodes[node_sender].sendmany( dummy="", amounts=dict.fromkeys( destinations, 0.001)) raw_tx = self.nodes[node_sender].getrawtransaction(txid) tx = self.nodes[node_sender].decoderawtransaction(raw_tx) # Make sure the transaction has change: assert_equal(len(tx["vout"]), len(destinations) + 1) # Make sure the destinations are included, and remove them: output_addresses = [vout['scriptPubKey']['addresses'][0] for vout in tx["vout"]] change_addresses = [ d for d in output_addresses if d not in destinations] assert_equal(len(change_addresses), 1) self.log.debug( "Check if change address " + change_addresses[0] + " is " + expected_type) self.test_address( node_sender, change_addresses[0], multisig=False, typ=expected_type) def run_test(self): # Mine 101 blocks on node4 to bring nodes out of IBD and make sure that # no coinbases are maturing for the nodes-under-test during the test self.nodes[4].generate(101) - sync_blocks(self.nodes) + self.sync_blocks() uncompressed_1 = "0496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858ee" uncompressed_2 = "047211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073dee6c89064984f03385237d92167c13e236446b417ab79a0fcae412ae3316b77" compressed_1 = "0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52" compressed_2 = "037211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073" # addmultisigaddress with at least 1 uncompressed key should return a # legacy address. for node in range(4): self.test_address(node, self.nodes[node].addmultisigaddress( 2, [uncompressed_1, uncompressed_2])['address'], True, 'legacy') self.test_address(node, self.nodes[node].addmultisigaddress( 2, [compressed_1, uncompressed_2])['address'], True, 'legacy') self.test_address(node, self.nodes[node].addmultisigaddress( 2, [uncompressed_1, compressed_2])['address'], True, 'legacy') # addmultisigaddress with all compressed keys should return the # appropriate address type (even when the keys are not ours). self.test_address(0, self.nodes[0].addmultisigaddress( 2, [compressed_1, compressed_2])['address'], True, 'legacy') for multisig, from_node in itertools.product([False, True], range(4)): self.log.info( "Sending from node {} with{} multisig".format(from_node, "" if multisig else "out")) old_balances = self.get_balances() self.log.debug("Old balances are {}".format(old_balances)) to_send = ( old_balances[from_node] / 101).quantize( Decimal("0.00000001")) sends = {} addresses = {} self.log.debug("Prepare sends") for n, to_node in enumerate(range(from_node, from_node + 4)): to_node %= 4 if not multisig: if from_node == to_node: # When sending non-multisig to self, use # getrawchangeaddress address = self.nodes[to_node].getrawchangeaddress() else: address = self.nodes[to_node].getnewaddress() else: addr1 = self.nodes[to_node].getnewaddress() addr2 = self.nodes[to_node].getnewaddress() address = self.nodes[to_node].addmultisigaddress(2, [addr1, addr2])[ 'address'] # Do some sanity checking on the created address typ = 'legacy' self.test_address(to_node, address, multisig, typ) # Output entry sends[address] = to_send * 10 * (1 + n) addresses[to_node] = (address, typ) self.log.debug("Sending: {}".format(sends)) self.nodes[from_node].sendmany("", sends) - sync_mempools(self.nodes) + self.sync_mempools() unconf_balances = self.get_balances(False) self.log.debug( "Check unconfirmed balances: {}".format(unconf_balances)) assert_equal(unconf_balances[from_node], 0) for n, to_node in enumerate(range(from_node + 1, from_node + 4)): to_node %= 4 assert_equal(unconf_balances[to_node], to_send * 10 * (2 + n)) # node4 collects fee and block subsidy to keep accounting simple self.nodes[4].generate(1) - sync_blocks(self.nodes) + self.sync_blocks() # Verify that the receiving wallet contains a UTXO with the # expected address, and expected descriptor for n, to_node in enumerate(range(from_node, from_node + 4)): to_node %= 4 found = False for utxo in self.nodes[to_node].listunspent(): if utxo['address'] == addresses[to_node][0]: found = True self.test_desc( to_node, addresses[to_node][0], multisig, addresses[to_node][1], utxo) break assert found new_balances = self.get_balances() self.log.debug("Check new balances: {}".format(new_balances)) # We don't know what fee was set, so we can only check bounds on # the balance of the sending node assert_greater_than(new_balances[from_node], to_send * 10) assert_greater_than(to_send * 11, new_balances[from_node]) for n, to_node in enumerate(range(from_node + 1, from_node + 4)): to_node %= 4 assert_equal( new_balances[to_node], old_balances[to_node] + to_send * 10 * (2 + n)) # Get addresses from node2 and node3: to_address_2 = self.nodes[2].getnewaddress() to_address_3_1 = self.nodes[3].getnewaddress() to_address_3_2 = self.nodes[3].getnewaddress() self.log.info("Various change output tests") self.test_change_output_type(0, [to_address_3_1], 'legacy') self.test_change_output_type(1, [to_address_2], 'legacy') self.test_change_output_type(1, [to_address_3_1], 'legacy') self.test_change_output_type( 1, [to_address_2, to_address_3_1], 'legacy') self.test_change_output_type( 1, [to_address_3_1, to_address_3_2], 'legacy') self.test_change_output_type(2, [to_address_3_1], 'legacy') self.log.info('Test getrawchangeaddress') self.test_address( 3, self.nodes[3].getrawchangeaddress(), multisig=False, typ='legacy') if __name__ == '__main__': AddressTypeTest().main() diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py index 0363017818..fec54b6b24 100755 --- a/test/functional/wallet_backup.py +++ b/test/functional/wallet_backup.py @@ -1,243 +1,241 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the wallet backup features. Test case is: 4 nodes. 1 2 and 3 send transactions between each other, fourth node is a miner. 1 2 3 each mine a block to start, then Miner creates 100 blocks so 1 2 3 each have 50 mature coins to spend. Then 5 iterations of 1/2/3 sending coins amongst themselves to get transactions in the wallets, and the miner mining one block. Wallets are backed up using dumpwallet/backupwallet. Then 5 more iterations of transactions and mining a block. Miner then generates 101 more blocks, so any transaction fees paid mature. Sanity check: Sum(1,2,3,4 balances) == 114*50 1/2/3 are shutdown, and their wallets erased. Then restore using wallet.dat backup. And confirm 1/2/3/4 balances are same as before. Shutdown again, restore using importwallet, and confirm again balances are correct. """ from decimal import Decimal import os from random import randint import shutil from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, connect_nodes, - sync_blocks, - sync_mempools, ) class WalletBackupTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.setup_clean_chain = True # nodes 1, 2,3 are spenders, let's give them a keypool=100 # whitelist all peers to speed up tx relay / mempool sync self.extra_args = [ ["-keypool=100", "-whitelist=127.0.0.1"], ["-keypool=100", "-whitelist=127.0.0.1"], ["-keypool=100", "-whitelist=127.0.0.1"], ["-whitelist=127.0.0.1"] ] self.rpc_timeout = 120 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self): self.setup_nodes() connect_nodes(self.nodes[0], self.nodes[3]) connect_nodes(self.nodes[1], self.nodes[3]) connect_nodes(self.nodes[2], self.nodes[3]) connect_nodes(self.nodes[2], self.nodes[0]) self.sync_all() def one_send(self, from_node, to_address): if (randint(1, 2) == 1): amount = Decimal(randint(1, 10)) / Decimal(10) self.nodes[from_node].sendtoaddress(to_address, amount) def do_one_round(self): a0 = self.nodes[0].getnewaddress() a1 = self.nodes[1].getnewaddress() a2 = self.nodes[2].getnewaddress() self.one_send(0, a1) self.one_send(0, a2) self.one_send(1, a0) self.one_send(1, a2) self.one_send(2, a0) self.one_send(2, a1) # Have the miner (node3) mine a block. # Must sync mempools before mining. - sync_mempools(self.nodes) + self.sync_mempools() self.nodes[3].generate(1) - sync_blocks(self.nodes) + self.sync_blocks() # As above, this mirrors the original bash test. def start_three(self): self.start_node(0) self.start_node(1) self.start_node(2) connect_nodes(self.nodes[0], self.nodes[3]) connect_nodes(self.nodes[1], self.nodes[3]) connect_nodes(self.nodes[2], self.nodes[3]) connect_nodes(self.nodes[2], self.nodes[0]) def stop_three(self): self.stop_node(0) self.stop_node(1) self.stop_node(2) def erase_three(self): os.remove(os.path.join( self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat')) os.remove(os.path.join( self.nodes[1].datadir, 'regtest', 'wallets', 'wallet.dat')) os.remove(os.path.join( self.nodes[2].datadir, 'regtest', 'wallets', 'wallet.dat')) def run_test(self): self.log.info("Generating initial blockchain") self.nodes[0].generate(1) - sync_blocks(self.nodes) + self.sync_blocks() self.nodes[1].generate(1) - sync_blocks(self.nodes) + self.sync_blocks() self.nodes[2].generate(1) - sync_blocks(self.nodes) + self.sync_blocks() self.nodes[3].generate(100) - sync_blocks(self.nodes) + self.sync_blocks() assert_equal(self.nodes[0].getbalance(), 50) assert_equal(self.nodes[1].getbalance(), 50) assert_equal(self.nodes[2].getbalance(), 50) assert_equal(self.nodes[3].getbalance(), 0) self.log.info("Creating transactions") # Five rounds of sending each other transactions. for i in range(5): self.do_one_round() self.log.info("Backing up") self.nodes[0].backupwallet(os.path.join( self.nodes[0].datadir, 'wallet.bak')) self.nodes[0].dumpwallet(os.path.join( self.nodes[0].datadir, 'wallet.dump')) self.nodes[1].backupwallet(os.path.join( self.nodes[1].datadir, 'wallet.bak')) self.nodes[1].dumpwallet(os.path.join( self.nodes[1].datadir, 'wallet.dump')) self.nodes[2].backupwallet(os.path.join( self.nodes[2].datadir, 'wallet.bak')) self.nodes[2].dumpwallet(os.path.join( self.nodes[2].datadir, 'wallet.dump')) self.log.info("More transactions") for i in range(5): self.do_one_round() # Generate 101 more blocks, so any fees paid mature self.nodes[3].generate(101) self.sync_all() balance0 = self.nodes[0].getbalance() balance1 = self.nodes[1].getbalance() balance2 = self.nodes[2].getbalance() balance3 = self.nodes[3].getbalance() total = balance0 + balance1 + balance2 + balance3 # At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.) # 114 are mature, so the sum of all wallets should be 114 * 50 = 5700. assert_equal(total, 5700) ## # Test restoring spender wallets from backups ## self.log.info("Restoring using wallet.dat") self.stop_three() self.erase_three() # Start node2 with no chain shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks')) shutil.rmtree(os.path.join( self.nodes[2].datadir, 'regtest', 'chainstate')) # Restore wallets from backup shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join( self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat')) shutil.copyfile(os.path.join(self.nodes[1].datadir, 'wallet.bak'), os.path.join( self.nodes[1].datadir, 'regtest', 'wallets', 'wallet.dat')) shutil.copyfile(os.path.join(self.nodes[2].datadir, 'wallet.bak'), os.path.join( self.nodes[2].datadir, 'regtest', 'wallets', 'wallet.dat')) self.log.info("Re-starting nodes") self.start_three() - sync_blocks(self.nodes) + self.sync_blocks() assert_equal(self.nodes[0].getbalance(), balance0) assert_equal(self.nodes[1].getbalance(), balance1) assert_equal(self.nodes[2].getbalance(), balance2) self.log.info("Restoring using dumped wallet") self.stop_three() self.erase_three() # start node2 with no chain shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks')) shutil.rmtree(os.path.join( self.nodes[2].datadir, 'regtest', 'chainstate')) self.start_three() assert_equal(self.nodes[0].getbalance(), 0) assert_equal(self.nodes[1].getbalance(), 0) assert_equal(self.nodes[2].getbalance(), 0) self.nodes[0].importwallet(os.path.join( self.nodes[0].datadir, 'wallet.dump')) self.nodes[1].importwallet(os.path.join( self.nodes[1].datadir, 'wallet.dump')) self.nodes[2].importwallet(os.path.join( self.nodes[2].datadir, 'wallet.dump')) - sync_blocks(self.nodes) + self.sync_blocks() assert_equal(self.nodes[0].getbalance(), balance0) assert_equal(self.nodes[1].getbalance(), balance1) assert_equal(self.nodes[2].getbalance(), balance2) # Backup to source wallet file must fail sourcePaths = [ os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'), os.path.join(self.nodes[0].datadir, 'regtest', '.', 'wallets', 'wallet.dat'), os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', ''), os.path.join(self.nodes[0].datadir, 'regtest', 'wallets')] for sourcePath in sourcePaths: assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath) if __name__ == '__main__': WalletBackupTest().main() diff --git a/test/functional/wallet_balance.py b/test/functional/wallet_balance.py index 409166d253..1b0cd9e5bf 100755 --- a/test/functional/wallet_balance.py +++ b/test/functional/wallet_balance.py @@ -1,159 +1,159 @@ #!/usr/bin/env python3 -# Copyright (c) 2018 The Bitcoin Core developers +# Copyright (c) 2018-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the wallet balance RPC methods.""" from test_framework.util import ( assert_equal, assert_raises_rpc_error, ) from test_framework.test_framework import BitcoinTestFramework from decimal import Decimal RANDOM_COINBASE_ADDRESS = 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ' def create_transactions(node, address, amt, fees): # Create and sign raw transactions from node to address for amt. # Creates a transaction for each fee and returns an array # of the raw transactions. utxos = node.listunspent(0) # Create transactions inputs = [] ins_total = 0 for utxo in utxos: inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]}) ins_total += utxo['amount'] if ins_total > amt: break txs = [] for fee in fees: outputs = { address: amt, node.getrawchangeaddress(): ins_total - amt - fee} raw_tx = node.createrawtransaction(inputs, outputs, 0) raw_tx = node.signrawtransactionwithwallet(raw_tx) txs.append(raw_tx) return txs class WalletTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): # Check that nodes don't own any UTXOs assert_equal(len(self.nodes[0].listunspent()), 0) assert_equal(len(self.nodes[1].listunspent()), 0) self.log.info("Mining one block for each node") self.nodes[0].generate(1) self.sync_all() self.nodes[1].generate(1) self.nodes[1].generatetoaddress(100, RANDOM_COINBASE_ADDRESS) self.sync_all() assert_equal(self.nodes[0].getbalance(), 50) assert_equal(self.nodes[1].getbalance(), 50) self.log.info("Test getbalance with different arguments") assert_equal(self.nodes[0].getbalance("*"), 50) assert_equal(self.nodes[0].getbalance("*", 1), 50) assert_equal(self.nodes[0].getbalance("*", 1, True), 50) assert_equal(self.nodes[0].getbalance(minconf=1), 50) # Send 40 BTC from 0 to 1 and 60 BTC from 1 to 0. txs = create_transactions( self.nodes[0], self.nodes[1].getnewaddress(), 40, [Decimal('0.01')]) self.nodes[0].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation self.nodes[1].sendrawtransaction(txs[0]['hex']) self.sync_all() txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), 60, [ Decimal('0.01'), Decimal('0.02')]) self.nodes[1].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation self.nodes[0].sendrawtransaction(txs[0]['hex']) self.sync_all() # First argument of getbalance must be set to "*" assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[1].getbalance, "") self.log.info( "Test getbalance and getunconfirmedbalance with unconfirmed inputs") # getbalance without any arguments includes unconfirmed transactions, # but not untrusted transactions assert_equal(self.nodes[0].getbalance(), Decimal( '9.99')) # change from node 0's send assert_equal(self.nodes[1].getbalance(), Decimal( '29.99')) # change from node 1's send # Same with minconf=0 assert_equal(self.nodes[0].getbalance(minconf=0), Decimal('9.99')) assert_equal(self.nodes[1].getbalance(minconf=0), Decimal('29.99')) # getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago # TODO: fix getbalance tracking of coin spentness depth assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0')) assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0')) # getunconfirmedbalance # output of node 1's spend assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # Doesn't include output of node 0's send since it was spent assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('0')) # In the original Core version of this test, Node 1 would've bumped # the fee by 0.01 here to resend, but this is BCH, so it has 0.01 BCH # left to spend on goods and services self.sync_all() self.log.info( "Test getbalance and getunconfirmedbalance with conflicted unconfirmed inputs") # output of node 1's send assert_equal(self.nodes[0].getwalletinfo()[ "unconfirmed_balance"], Decimal('60')) assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # Doesn't include output of node 0's send since it was spent assert_equal(self.nodes[1].getwalletinfo()[ "unconfirmed_balance"], Decimal('0')) assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('0')) self.nodes[1].generatetoaddress(1, RANDOM_COINBASE_ADDRESS) self.sync_all() # balances are correct after the transactions are confirmed # node 1's send plus change from node 0's send assert_equal(self.nodes[0].getbalance(), Decimal('69.99')) assert_equal(self.nodes[1].getbalance(), Decimal( '29.99')) # change from node 0's send # Send total balance away from node 1 txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress( ), Decimal('29.97'), [Decimal('0.01')]) self.nodes[1].sendrawtransaction(txs[0]['hex']) self.nodes[1].generatetoaddress(2, RANDOM_COINBASE_ADDRESS) self.sync_all() # getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago # TODO: fix getbalance tracking of coin spentness depth # getbalance with minconf=3 should still show the old balance assert_equal(self.nodes[1].getbalance(minconf=3), Decimal('0')) # getbalance with minconf=2 will show the new balance. assert_equal(self.nodes[1].getbalance(minconf=2), Decimal('0.01')) if __name__ == '__main__': WalletTest().main() diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py index 0ba02b3798..d0da3ca7c2 100755 --- a/test/functional/wallet_basic.py +++ b/test/functional/wallet_basic.py @@ -1,534 +1,533 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the wallet.""" from decimal import Decimal import time from test_framework.messages import FromHex, CTransaction from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_array_result, assert_equal, assert_fee_amount, assert_raises_rpc_error, connect_nodes_bi, count_bytes, - sync_blocks, wait_until, ) class WalletTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.setup_clean_chain = True self.extra_args = [ ["-acceptnonstdtxn=1"], ] * self.num_nodes def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self): self.setup_nodes() # Only need nodes 0-2 running at start of test self.stop_node(3) connect_nodes_bi(self.nodes[0], self.nodes[1]) connect_nodes_bi(self.nodes[1], self.nodes[2]) connect_nodes_bi(self.nodes[0], self.nodes[2]) self.sync_all(self.nodes[0:3]) def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size): """Return curr_balance after asserting the fee was in range""" fee = balance_with_fee - curr_balance assert_fee_amount(fee, tx_size, fee_per_byte * 1000) return curr_balance def run_test(self): # Check that there's no UTXO on none of the nodes assert_equal(len(self.nodes[0].listunspent()), 0) assert_equal(len(self.nodes[1].listunspent()), 0) assert_equal(len(self.nodes[2].listunspent()), 0) self.log.info("Mining blocks...") self.nodes[0].generate(1) walletinfo = self.nodes[0].getwalletinfo() assert_equal(walletinfo['immature_balance'], 50) assert_equal(walletinfo['balance'], 0) self.sync_all(self.nodes[0:3]) self.nodes[1].generate(101) self.sync_all(self.nodes[0:3]) assert_equal(self.nodes[0].getbalance(), 50) assert_equal(self.nodes[1].getbalance(), 50) assert_equal(self.nodes[2].getbalance(), 0) # Check that only first and second nodes have UTXOs utxos = self.nodes[0].listunspent() assert_equal(len(utxos), 1) assert_equal(len(self.nodes[1].listunspent()), 1) assert_equal(len(self.nodes[2].listunspent()), 0) self.log.info("test gettxout") confirmed_txid, confirmed_index = utxos[0]["txid"], utxos[0]["vout"] # First, outputs that are unspent both in the chain and in the # mempool should appear with or without include_mempool txout = self.nodes[0].gettxout( txid=confirmed_txid, n=confirmed_index, include_mempool=False) assert_equal(txout['value'], 50) txout = self.nodes[0].gettxout( txid=confirmed_txid, n=confirmed_index, include_mempool=True) assert_equal(txout['value'], 50) # Send 21 BCH from 0 to 2 using sendtoaddress call. self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11) mempool_txid = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), 10) self.log.info("test gettxout (second part)") # utxo spent in mempool should be visible if you exclude mempool # but invisible if you include mempool txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, False) assert_equal(txout['value'], 50) txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, True) assert txout is None # new utxo from mempool should be invisible if you exclude mempool # but visible if you include mempool txout = self.nodes[0].gettxout(mempool_txid, 0, False) assert txout is None txout1 = self.nodes[0].gettxout(mempool_txid, 0, True) txout2 = self.nodes[0].gettxout(mempool_txid, 1, True) # note the mempool tx will have randomly assigned indices # but 10 will go to node2 and the rest will go to node0 balance = self.nodes[0].getbalance() assert_equal(set([txout1['value'], txout2['value']]), set([10, balance])) walletinfo = self.nodes[0].getwalletinfo() assert_equal(walletinfo['immature_balance'], 0) # Have node0 mine a block, thus it will collect its own fee. self.nodes[0].generate(1) self.sync_all(self.nodes[0:3]) # Exercise locking of unspent outputs unspent_0 = self.nodes[2].listunspent()[0] unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]} assert_raises_rpc_error(-8, "Invalid parameter, expected locked output", self.nodes[2].lockunspent, True, [unspent_0]) self.nodes[2].lockunspent(False, [unspent_0]) assert_raises_rpc_error(-8, "Invalid parameter, output already locked", self.nodes[2].lockunspent, False, [unspent_0]) assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20) assert_equal([unspent_0], self.nodes[2].listlockunspent()) self.nodes[2].lockunspent(True, [unspent_0]) assert_equal(len(self.nodes[2].listlockunspent()), 0) assert_raises_rpc_error(-8, "txid must be of length 64 (not 34, for '0000000000000000000000000000000000')", self.nodes[2].lockunspent, False, [{"txid": "0000000000000000000000000000000000", "vout": 0}]) assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[2].lockunspent, False, [{"txid": "ZZZ0000000000000000000000000000000000000000000000000000000000000", "vout": 0}]) assert_raises_rpc_error(-8, "Invalid parameter, unknown transaction", self.nodes[2].lockunspent, False, [{"txid": "0000000000000000000000000000000000000000000000000000000000000000", "vout": 0}]) assert_raises_rpc_error(-8, "Invalid parameter, vout index out of bounds", self.nodes[2].lockunspent, False, [{"txid": unspent_0["txid"], "vout": 999}]) # Have node1 generate 100 blocks (so node0 can recover the fee) self.nodes[1].generate(100) self.sync_all(self.nodes[0:3]) # node0 should end up with 100 btc in block rewards plus fees, but # minus the 21 plus fees sent to node2 assert_equal(self.nodes[0].getbalance(), 100 - 21) assert_equal(self.nodes[2].getbalance(), 21) # Node0 should have two unspent outputs. # Create a couple of transactions to send them to node2, submit them through # node1, and make sure both node0 and node2 pick them up properly: node0utxos = self.nodes[0].listunspent(1) assert_equal(len(node0utxos), 2) # create both transactions txns_to_send = [] for utxo in node0utxos: inputs = [] outputs = {} inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]}) outputs[self.nodes[2].getnewaddress()] = utxo["amount"] - 3 raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) txns_to_send.append( self.nodes[0].signrawtransactionwithwallet(raw_tx)) # Have node 1 (miner) send the transactions self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True) self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True) # Have node1 mine a block to confirm transactions: self.nodes[1].generate(1) self.sync_all(self.nodes[0:3]) assert_equal(self.nodes[0].getbalance(), 0) assert_equal(self.nodes[2].getbalance(), 94) # Verify that a spent output cannot be locked anymore spent_0 = {"txid": node0utxos[0]["txid"], "vout": node0utxos[0]["vout"]} assert_raises_rpc_error(-8, "Invalid parameter, expected unspent output", self.nodes[0].lockunspent, False, [spent_0]) # Send 10 BCH normal old_balance = self.nodes[2].getbalance() address = self.nodes[0].getnewaddress("test") fee_per_byte = Decimal('0.001') / 1000 self.nodes[2].settxfee(fee_per_byte * 1000) txid = self.nodes[2].sendtoaddress(address, 10, "", "", False) self.nodes[2].generate(1) self.sync_all(self.nodes[0:3]) ctx = FromHex(CTransaction(), self.nodes[2].gettransaction(txid)['hex']) node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), old_balance - Decimal('10'), fee_per_byte, ctx.billable_size()) assert_equal(self.nodes[0].getbalance(), Decimal('10')) # Send 10 BCH with subtract fee from amount txid = self.nodes[2].sendtoaddress(address, 10, "", "", True) self.nodes[2].generate(1) self.sync_all(self.nodes[0:3]) node_2_bal -= Decimal('10') assert_equal(self.nodes[2].getbalance(), node_2_bal) node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal( '20'), fee_per_byte, count_bytes(self.nodes[2].gettransaction(txid)['hex'])) # Sendmany 10 BCH txid = self.nodes[2].sendmany('', {address: 10}, 0, "", []) self.nodes[2].generate(1) self.sync_all(self.nodes[0:3]) node_0_bal += Decimal('10') ctx = FromHex(CTransaction(), self.nodes[2].gettransaction(txid)['hex']) node_2_bal = self.check_fee_amount(self.nodes[2].getbalance( ), node_2_bal - Decimal('10'), fee_per_byte, ctx.billable_size()) assert_equal(self.nodes[0].getbalance(), node_0_bal) # Sendmany 10 BCH with subtract fee from amount txid = self.nodes[2].sendmany('', {address: 10}, 0, "", [address]) self.nodes[2].generate(1) self.sync_all(self.nodes[0:3]) node_2_bal -= Decimal('10') assert_equal(self.nodes[2].getbalance(), node_2_bal) ctx = FromHex(CTransaction(), self.nodes[2].gettransaction(txid)['hex']) node_0_bal = self.check_fee_amount(self.nodes[0].getbalance( ), node_0_bal + Decimal('10'), fee_per_byte, ctx.billable_size()) self.start_node(3, self.extra_args[3]) connect_nodes_bi(self.nodes[0], self.nodes[3]) self.sync_all() # check if we can list zero value tx as available coins # 1. create rawtx # 2. hex-changed one output to 0.0 # 3. sign and send # 4. check if recipient (node0) can list the zero value tx usp = self.nodes[1].listunspent() inputs = [{"txid": usp[0]['txid'], "vout":usp[0]['vout']}] outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11} rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace( "c0833842", "00000000") # replace 11.11 with 0.0 (int32) decRawTx = self.nodes[1].decoderawtransaction(rawTx) signedRawTx = self.nodes[1].signrawtransactionwithwallet(rawTx) decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex']) zeroValueTxid = decRawTx['txid'] self.nodes[1].sendrawtransaction(signedRawTx['hex']) self.sync_all() self.nodes[1].generate(1) # mine a block self.sync_all() # zero value tx must be in listunspents output unspentTxs = self.nodes[0].listunspent() found = False for uTx in unspentTxs: if uTx['txid'] == zeroValueTxid: found = True assert_equal(uTx['amount'], Decimal('0')) assert found # do some -walletbroadcast tests self.stop_nodes() self.start_node(0, self.extra_args[0] + ["-walletbroadcast=0"]) self.start_node(1, self.extra_args[1] + ["-walletbroadcast=0"]) self.start_node(2, self.extra_args[2] + ["-walletbroadcast=0"]) connect_nodes_bi(self.nodes[0], self.nodes[1]) connect_nodes_bi(self.nodes[1], self.nodes[2]) connect_nodes_bi(self.nodes[0], self.nodes[2]) self.sync_all(self.nodes[0:3]) txIdNotBroadcasted = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), 2) txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted) self.nodes[1].generate(1) # mine a block, tx should not be in there self.sync_all(self.nodes[0:3]) # should not be changed because tx was not broadcasted assert_equal(self.nodes[2].getbalance(), node_2_bal) # now broadcast from another node, mine a block, sync, and check the # balance self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex']) self.nodes[1].generate(1) self.sync_all(self.nodes[0:3]) node_2_bal += 2 txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted) assert_equal(self.nodes[2].getbalance(), node_2_bal) # create another tx txIdNotBroadcasted = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), 2) # restart the nodes with -walletbroadcast=1 self.stop_nodes() self.start_node(0, self.extra_args[0]) self.start_node(1, self.extra_args[1]) self.start_node(2, self.extra_args[2]) connect_nodes_bi(self.nodes[0], self.nodes[1]) connect_nodes_bi(self.nodes[1], self.nodes[2]) connect_nodes_bi(self.nodes[0], self.nodes[2]) - sync_blocks(self.nodes[0:3]) + self.sync_blocks(self.nodes[0:3]) self.nodes[0].generate(1) - sync_blocks(self.nodes[0:3]) + self.sync_blocks(self.nodes[0:3]) node_2_bal += 2 # tx should be added to balance because after restarting the nodes tx # should be broadcasted assert_equal(self.nodes[2].getbalance(), node_2_bal) # send a tx with value in a string (PR#6380 +) txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2") txObj = self.nodes[0].gettransaction(txId) assert_equal(txObj['amount'], Decimal('-2')) txId = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), "0.0001") txObj = self.nodes[0].gettransaction(txId) assert_equal(txObj['amount'], Decimal('-0.0001')) # check if JSON parser can handle scientific notation in strings txId = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), "1e-4") txObj = self.nodes[0].gettransaction(txId) assert_equal(txObj['amount'], Decimal('-0.0001')) # This will raise an exception because the amount type is wrong assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "1f-4") # This will raise an exception since generate does not accept a string assert_raises_rpc_error(-1, "not an integer", self.nodes[0].generate, "2") # Import address and private key to check correct behavior of spendable unspents # 1. Send some coins to generate new UTXO address_to_import = self.nodes[2].getnewaddress() txid = self.nodes[0].sendtoaddress(address_to_import, 1) self.nodes[0].generate(1) self.sync_all(self.nodes[0:3]) # 2. Import address from node2 to node1 self.nodes[1].importaddress(address_to_import) # 3. Validate that the imported address is watch-only on node1 assert self.nodes[1].getaddressinfo(address_to_import)["iswatchonly"] # 4. Check that the unspents after import are not spendable assert_array_result(self.nodes[1].listunspent(), {"address": address_to_import}, {"spendable": False}) # 5. Import private key of the previously imported address on node1 priv_key = self.nodes[2].dumpprivkey(address_to_import) self.nodes[1].importprivkey(priv_key) # 6. Check that the unspents are now spendable on node1 assert_array_result(self.nodes[1].listunspent(), {"address": address_to_import}, {"spendable": True}) # Mine a block from node0 to an address from node1 cbAddr = self.nodes[1].getnewaddress() blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0] cbTxId = self.nodes[0].getblock(blkHash)['tx'][0] self.sync_all(self.nodes[0:3]) # Check that the txid and balance is found by node1 self.nodes[1].gettransaction(cbTxId) # check if wallet or blockchain maintenance changes the balance self.sync_all(self.nodes[0:3]) blocks = self.nodes[0].generate(2) self.sync_all(self.nodes[0:3]) balance_nodes = [self.nodes[i].getbalance() for i in range(3)] block_count = self.nodes[0].getblockcount() # Check modes: # - True: unicode escaped as \u.... # - False: unicode directly as UTF-8 for mode in [True, False]: self.nodes[0].rpc.ensure_ascii = mode # unicode check: Basic Multilingual Plane, Supplementary Plane # respectively for label in [u'рыба', u'𝅘𝅥𝅯']: addr = self.nodes[0].getnewaddress() self.nodes[0].setlabel(addr, label) assert_equal(self.nodes[0].getaddressinfo( addr)['label'], label) assert label in self.nodes[0].listlabels() # restore to default self.nodes[0].rpc.ensure_ascii = True # maintenance tests maintenance = [ '-rescan', '-reindex', '-zapwallettxes=1', '-zapwallettxes=2', # disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463 # '-salvagewallet', ] chainlimit = 6 for m in maintenance: self.log.info("check " + m) self.stop_nodes() # set lower ancestor limit for later self.start_node( 0, self.extra_args[0] + [m, "-limitancestorcount=" + str(chainlimit)]) self.start_node( 1, self.extra_args[1] + [m, "-limitancestorcount=" + str(chainlimit)]) self.start_node( 2, self.extra_args[2] + [m, "-limitancestorcount=" + str(chainlimit)]) if m == '-reindex': # reindex will leave rpc warm up "early"; Wait for it to finish wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)]) assert_equal(balance_nodes, [ self.nodes[i].getbalance() for i in range(3)]) # Exercise listsinceblock with the last two blocks coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0]) assert_equal(coinbase_tx_1["lastblock"], blocks[1]) assert_equal(len(coinbase_tx_1["transactions"]), 1) assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1]) assert_equal(len(self.nodes[0].listsinceblock( blocks[1])["transactions"]), 0) # ==Check that wallet prefers to use coins that don't exceed mempool li # Get all non-zero utxos together chain_addrs = [self.nodes[0].getnewaddress( ), self.nodes[0].getnewaddress()] singletxid = self.nodes[0].sendtoaddress( chain_addrs[0], self.nodes[0].getbalance(), "", "", True) self.nodes[0].generate(1) node0_balance = self.nodes[0].getbalance() # Split into two chains rawtx = self.nodes[0].createrawtransaction([{"txid": singletxid, "vout": 0}], { chain_addrs[0]: node0_balance / 2 - Decimal('0.01'), chain_addrs[1]: node0_balance / 2 - Decimal('0.01')}) signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx) singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"]) self.nodes[0].generate(1) # Make a long chain of unconfirmed payments without hitting mempool limit # Each tx we make leaves only one output of change on a chain 1 longer # Since the amount to send is always much less than the outputs, we only ever need one output # So we should be able to generate exactly chainlimit txs for each # original output sending_addr = self.nodes[1].getnewaddress() txid_list = [] for i in range(chainlimit * 2): txid_list.append(self.nodes[0].sendtoaddress( sending_addr, Decimal('0.0001'))) assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit * 2) assert_equal(len(txid_list), chainlimit * 2) # Without walletrejectlongchains, we will still generate a txid # The tx will be stored in the wallet but not accepted to the mempool extra_txid = self.nodes[0].sendtoaddress( sending_addr, Decimal('0.0001')) assert extra_txid not in self.nodes[0].getrawmempool() assert extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()] self.nodes[0].abandontransaction(extra_txid) total_txs = len(self.nodes[0].listtransactions("*", 99999)) # Try with walletrejectlongchains # Double chain limit but require combining inputs, so we pass # SelectCoinsMinConf self.stop_node(0) self.start_node(0, self.extra_args[0] + ["-walletrejectlongchains", "-limitancestorcount=" + str(2 * chainlimit)]) # wait for loadmempool timeout = 10 while (timeout > 0 and len( self.nodes[0].getrawmempool()) < chainlimit * 2): time.sleep(0.5) timeout -= 0.5 assert_equal(len(self.nodes[0].getrawmempool()), chainlimit * 2) node0_balance = self.nodes[0].getbalance() # With walletrejectlongchains we will not create the tx and store it in # our wallet. assert_raises_rpc_error(-4, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01')) # Verify nothing new in wallet assert_equal(total_txs, len( self.nodes[0].listtransactions("*", 99999))) # Test getaddressinfo on external address. Note that these addresses # are taken from disablewallet.py assert_raises_rpc_error(-5, "Invalid address", self.nodes[0].getaddressinfo, "3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy") address_info = self.nodes[0].getaddressinfo( "mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ") assert_equal(address_info['address'], "bchreg:qp8rs4qyd3aazk22eyzwg7fmdfzmxm02pywavdajx4") assert_equal(address_info["scriptPubKey"], "76a9144e3854046c7bd1594ac904e4793b6a45b36dea0988ac") assert not address_info["ismine"] assert not address_info["iswatchonly"] assert not address_info["isscript"] assert not address_info["ischange"] # Test getaddressinfo 'ischange' field on change address. self.nodes[0].generate(1) destination = self.nodes[1].getnewaddress() txid = self.nodes[0].sendtoaddress(destination, 0.123) tx = self.nodes[0].decoderawtransaction( self.nodes[0].gettransaction(txid)['hex']) output_addresses = [vout['scriptPubKey']['addresses'][0] for vout in tx["vout"]] assert len(output_addresses) > 1 for address in output_addresses: ischange = self.nodes[0].getaddressinfo(address)['ischange'] assert_equal(ischange, address != destination) if ischange: change = address self.nodes[0].setlabel(change, 'foobar') assert_equal(self.nodes[0].getaddressinfo(change)['ischange'], False) if __name__ == '__main__': WalletTest().main() diff --git a/test/functional/wallet_import_rescan.py b/test/functional/wallet_import_rescan.py index ad5d0ba549..29c6cb9f24 100755 --- a/test/functional/wallet_import_rescan.py +++ b/test/functional/wallet_import_rescan.py @@ -1,235 +1,234 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test wallet import RPCs. Test rescan behavior of importaddress, importpubkey, importprivkey, and importmulti RPCs with different types of keys and rescan options. In the first part of the test, node 0 creates an address for each type of import RPC call and node 0 sends BCH to it. Then other nodes import the addresses, and the test makes listtransactions and getbalance calls to confirm that the importing node either did or did not execute rescans picking up the send transactions. In the second part of the test, node 0 sends more BCH to each address, and the test makes more listtransactions and getbalance calls to confirm that the importing nodes pick up the new transactions regardless of whether rescans happened previously. """ import collections import enum import itertools from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, connect_nodes, set_node_times, - sync_blocks, ) Call = enum.Enum("Call", "single multiaddress multiscript") Data = enum.Enum("Data", "address pub priv") Rescan = enum.Enum("Rescan", "no yes late_timestamp") class Variant(collections.namedtuple("Variant", "call data rescan prune")): """Helper for importing one key and verifying scanned transactions.""" def try_rpc(self, func, *args, **kwargs): if self.expect_disabled: assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs) else: return func(*args, **kwargs) def do_import(self, timestamp): """Call one key import RPC.""" rescan = self.rescan == Rescan.yes if self.call == Call.single: if self.data == Data.address: response = self.try_rpc( self.node.importaddress, address=self.address["address"], label=self.label, rescan=rescan) elif self.data == Data.pub: response = self.try_rpc( self.node.importpubkey, pubkey=self.address["pubkey"], label=self.label, rescan=rescan) elif self.data == Data.priv: response = self.try_rpc( self.node.importprivkey, privkey=self.key, label=self.label, rescan=rescan) assert_equal(response, None) elif self.call in (Call.multiaddress, Call.multiscript): response = self.node.importmulti([{ "scriptPubKey": { "address": self.address["address"] } if self.call == Call.multiaddress else self.address["scriptPubKey"], "timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0), "pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [], "keys": [self.key] if self.data == Data.priv else [], "label": self.label, "watchonly": self.data != Data.priv }], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)}) assert_equal(response, [{"success": True}]) def check(self, txid=None, amount=None, confirmations=None): """Verify that listtransactions/listreceivedbyaddress return expected values.""" txs = self.node.listtransactions( label=self.label, count=10000, include_watchonly=True) assert_equal(len(txs), self.expected_txs) addresses = self.node.listreceivedbyaddress( minconf=0, include_watchonly=True, address_filter=self.address['address']) if self.expected_txs: assert_equal(len(addresses[0]["txids"]), self.expected_txs) if txid is not None: tx, = [tx for tx in txs if tx["txid"] == txid] assert_equal(tx["label"], self.label) assert_equal(tx["address"], self.address["address"]) assert_equal(tx["amount"], amount) assert_equal(tx["category"], "receive") assert_equal(tx["label"], self.label) assert_equal(tx["txid"], txid) assert_equal(tx["confirmations"], confirmations) assert_equal("trusted" not in tx, True) address, = [ad for ad in addresses if txid in ad["txids"]] assert_equal(address["address"], self.address["address"]) assert_equal(address["amount"], self.expected_balance) assert_equal(address["confirmations"], confirmations) # Verify the transaction is correctly marked watchonly depending on # whether the transaction pays to an imported public key or # imported private key. The test setup ensures that transaction # inputs will not be from watchonly keys (important because # involvesWatchonly will be true if either the transaction output # or inputs are watchonly). if self.data != Data.priv: assert_equal(address["involvesWatchonly"], True) else: assert_equal("involvesWatchonly" not in address, True) # List of Variants for each way a key or address could be imported. IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))] # List of nodes to import keys to. Half the nodes will have pruning disabled, # half will have it enabled. Different nodes will be used for imports that are # expected to cause rescans, and imports that are not expected to cause # rescans, in order to prevent rescans during later imports picking up # transactions associated with earlier imports. This makes it easier to keep # track of expected balances and transactions. ImportNode = collections.namedtuple("ImportNode", "prune rescan") IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)] # Rescans start at the earliest block up to 2 hours before the key timestamp. TIMESTAMP_WINDOW = 2 * 60 * 60 class ImportRescanTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 + len(IMPORT_NODES) def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self): extra_args = [[] for _ in range(self.num_nodes)] for i, import_node in enumerate(IMPORT_NODES, 2): if import_node.prune: extra_args[i] += ["-prune=1"] self.add_nodes(self.num_nodes, extra_args=extra_args) # Import keys with pruning disabled self.start_nodes(extra_args=[[]] * self.num_nodes) for n in self.nodes: n.importprivkey( privkey=n.get_deterministic_priv_key().key, label='coinbase') self.stop_nodes() self.start_nodes() for i in range(1, self.num_nodes): connect_nodes(self.nodes[i], self.nodes[0]) def run_test(self): # Create one transaction on node 0 with a unique amount for # each possible type of wallet import RPC. for i, variant in enumerate(IMPORT_VARIANTS): variant.label = "label {} {}".format(i, variant) variant.address = self.nodes[1].getaddressinfo( self.nodes[1].getnewaddress(variant.label)) variant.key = self.nodes[1].dumpprivkey(variant.address["address"]) variant.initial_amount = 1 - (i + 1) / 64 variant.initial_txid = self.nodes[0].sendtoaddress( variant.address["address"], variant.initial_amount) # Generate a block containing the initial transactions, then another # block further in the future (past the rescan window). self.nodes[0].generate(1) assert_equal(self.nodes[0].getrawmempool(), []) timestamp = self.nodes[0].getblockheader( self.nodes[0].getbestblockhash())["time"] set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1) self.nodes[0].generate(1) - sync_blocks(self.nodes) + self.sync_blocks() # For each variation of wallet key import, invoke the import RPC and # check the results from getbalance and listtransactions. for variant in IMPORT_VARIANTS: variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled variant.node = self.nodes[ 2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))] variant.do_import(timestamp) if expect_rescan: variant.expected_balance = variant.initial_amount variant.expected_txs = 1 variant.check(variant.initial_txid, variant.initial_amount, 2) else: variant.expected_balance = 0 variant.expected_txs = 0 variant.check() # Create new transactions sending to each address. for i, variant in enumerate(IMPORT_VARIANTS): variant.sent_amount = 1 - (2 * i + 1) / 128 variant.sent_txid = self.nodes[0].sendtoaddress( variant.address["address"], variant.sent_amount) # Generate a block containing the new transactions. self.nodes[0].generate(1) assert_equal(self.nodes[0].getrawmempool(), []) - sync_blocks(self.nodes) + self.sync_blocks() # Check the latest results from getbalance and listtransactions. for variant in IMPORT_VARIANTS: if not variant.expect_disabled: variant.expected_balance += variant.sent_amount variant.expected_txs += 1 variant.check(variant.sent_txid, variant.sent_amount, 1) else: variant.check() if __name__ == "__main__": ImportRescanTest().main() diff --git a/test/functional/wallet_keypool_topup.py b/test/functional/wallet_keypool_topup.py index 8badd145a5..e2b6c547f2 100755 --- a/test/functional/wallet_keypool_topup.py +++ b/test/functional/wallet_keypool_topup.py @@ -1,75 +1,74 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test HD Wallet keypool restore function. Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks. - Start node1, shutdown and backup wallet. - Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110. - Stop node1, clear the datadir, move wallet file back into the datadir and restart node1. - connect node1 to node0. Verify that they sync and node1 receives its funds.""" import os import shutil from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes_bi, - sync_blocks, ) class KeypoolRestoreTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [[], ['-keypool=100']] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): wallet_path = os.path.join( self.nodes[1].datadir, "regtest", "wallets", "wallet.dat") wallet_backup_path = os.path.join(self.nodes[1].datadir, "wallet.bak") self.nodes[0].generate(101) self.log.info("Make backup of wallet") self.stop_node(1) shutil.copyfile(wallet_path, wallet_backup_path) self.start_node(1, self.extra_args[1]) connect_nodes_bi(self.nodes[0], self.nodes[1]) self.log.info("Generate keys for wallet") for _ in range(90): addr_oldpool = self.nodes[1].getnewaddress() for _ in range(20): addr_extpool = self.nodes[1].getnewaddress() self.log.info("Send funds to wallet") self.nodes[0].sendtoaddress(addr_oldpool, 10) self.nodes[0].generate(1) self.nodes[0].sendtoaddress(addr_extpool, 5) self.nodes[0].generate(1) - sync_blocks(self.nodes) + self.sync_blocks() self.log.info("Restart node with wallet backup") self.stop_node(1) shutil.copyfile(wallet_backup_path, wallet_path) self.start_node(1, self.extra_args[1]) connect_nodes_bi(self.nodes[0], self.nodes[1]) self.sync_all() self.log.info("Verify keypool is restored and balance is correct") assert_equal(self.nodes[1].getbalance(), 15) assert_equal(self.nodes[1].listtransactions() [0]['category'], "receive") # Check that we have marked all keys up to the used keypool key as used assert_equal(self.nodes[1].getaddressinfo( self.nodes[1].getnewaddress())['hdkeypath'], "m/0'/0'/110'") if __name__ == '__main__': KeypoolRestoreTest().main() diff --git a/test/functional/wallet_listreceivedby.py b/test/functional/wallet_listreceivedby.py index 2e5e923000..1345b78f18 100755 --- a/test/functional/wallet_listreceivedby.py +++ b/test/functional/wallet_listreceivedby.py @@ -1,190 +1,189 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the listreceivedbyaddress RPC.""" from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_array_result, assert_equal, assert_raises_rpc_error, - sync_blocks, ) class ReceivedByTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 def skip_test_if_missing_module(self): self.skip_if_no_wallet() self.skip_if_no_cli() def run_test(self): # Generate block to get out of IBD self.nodes[0].generate(1) - sync_blocks(self.nodes) + self.sync_blocks() # save the number of coinbase reward addresses so far num_cb_reward_addresses = len( self.nodes[1].listreceivedbyaddress( minconf=0, include_empty=True, include_watchonly=True)) self.log.info("listreceivedbyaddress Test") # Send from node 0 to 1 addr = self.nodes[1].getnewaddress() txid = self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() # Check not listed in listreceivedbyaddress because has 0 confirmations assert_array_result(self.nodes[1].listreceivedbyaddress(), {"address": addr}, {}, True) # Bury Tx under 10 block so it will be returned by # listreceivedbyaddress self.nodes[1].generate(10) self.sync_all() assert_array_result(self.nodes[1].listreceivedbyaddress(), {"address": addr}, {"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) # With min confidence < 10 assert_array_result(self.nodes[1].listreceivedbyaddress(5), {"address": addr}, {"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) # With min confidence > 10, should not find Tx assert_array_result(self.nodes[1].listreceivedbyaddress(11), { "address": addr}, {}, True) # Empty Tx empty_addr = self.nodes[1].getnewaddress() assert_array_result(self.nodes[1].listreceivedbyaddress(0, True), {"address": empty_addr}, {"address": empty_addr, "label": "", "amount": 0, "confirmations": 0, "txids": []}) # Test Address filtering # Only on addr expected = {"address": addr, "label": "", "amount": Decimal( "0.1"), "confirmations": 10, "txids": [txid, ]} res = self.nodes[1].listreceivedbyaddress( minconf=0, include_empty=True, include_watchonly=True, address_filter=addr) assert_array_result(res, {"address": addr}, expected) assert_equal(len(res), 1) # Error on invalid address assert_raises_rpc_error(-4, "address_filter parameter was invalid", self.nodes[1].listreceivedbyaddress, minconf=0, include_empty=True, include_watchonly=True, address_filter="bamboozling") # Another address receive money res = self.nodes[1].listreceivedbyaddress(0, True, True) # Right now 2 entries assert_equal(len(res), 2 + num_cb_reward_addresses) other_addr = self.nodes[1].getnewaddress() txid2 = self.nodes[0].sendtoaddress(other_addr, 0.1) self.nodes[0].generate(1) self.sync_all() # Same test as above should still pass expected = {"address": addr, "label": "", "amount": Decimal( "0.1"), "confirmations": 11, "txids": [txid, ]} res = self.nodes[1].listreceivedbyaddress(0, True, True, addr) assert_array_result(res, {"address": addr}, expected) assert_equal(len(res), 1) # Same test as above but with other_addr should still pass expected = {"address": other_addr, "label": "", "amount": Decimal( "0.1"), "confirmations": 1, "txids": [txid2, ]} res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr) assert_array_result(res, {"address": other_addr}, expected) assert_equal(len(res), 1) # Should be two entries though without filter res = self.nodes[1].listreceivedbyaddress(0, True, True) # Became 3 entries assert_equal(len(res), 3 + num_cb_reward_addresses) # Not on random addr # note on node[0]! just a random addr other_addr = self.nodes[0].getnewaddress() res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr) assert_equal(len(res), 0) self.log.info("getreceivedbyaddress Test") # Send from node 0 to 1 addr = self.nodes[1].getnewaddress() txid = self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() # Check balance is 0 because of 0 confirmations balance = self.nodes[1].getreceivedbyaddress(addr) assert_equal(balance, Decimal("0.0")) # Check balance is 0.1 balance = self.nodes[1].getreceivedbyaddress(addr, 0) assert_equal(balance, Decimal("0.1")) # Bury Tx under 10 block so it will be returned by the default # getreceivedbyaddress self.nodes[1].generate(10) self.sync_all() balance = self.nodes[1].getreceivedbyaddress(addr) assert_equal(balance, Decimal("0.1")) # Trying to getreceivedby for an address the wallet doesn't own should # return an error assert_raises_rpc_error(-4, "Address not found in wallet", self.nodes[0].getreceivedbyaddress, addr) self.log.info("listreceivedbylabel + getreceivedbylabel Test") # set pre-state label = '' addrArr = self.nodes[1].getnewaddress() assert_equal(self.nodes[1].getaddressinfo(addrArr)['label'], label) received_by_label_json = [ r for r in self.nodes[1].listreceivedbylabel() if r["label"] == label][0] balance_by_label = self.nodes[1].getreceivedbylabel(label) txid = self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() # listreceivedbylabel should return received_by_label_json because of 0 # confirmations assert_array_result(self.nodes[1].listreceivedbylabel(), {"label": label}, received_by_label_json) # getreceivedbyaddress should return same balance because of 0 # confirmations balance = self.nodes[1].getreceivedbylabel(label) assert_equal(balance, balance_by_label) self.nodes[1].generate(10) self.sync_all() # listreceivedbylabel should return updated received list assert_array_result(self.nodes[1].listreceivedbylabel(), {"label": label}, {"label": received_by_label_json["label"], "amount": (received_by_label_json["amount"] + Decimal("0.1"))}) # getreceivedbylabel should return updated receive total balance = self.nodes[1].getreceivedbylabel(label) assert_equal(balance, balance_by_label + Decimal("0.1")) # Create a new label named "mynewlabel" that has a 0 balance address = self.nodes[1].getnewaddress() self.nodes[1].setlabel(address, "mynewlabel") received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel( 0, True) if r["label"] == "mynewlabel"][0] # Test includeempty of listreceivedbylabel assert_equal(received_by_label_json["amount"], Decimal("0.0")) # Test getreceivedbylabel for 0 amount labels balance = self.nodes[1].getreceivedbylabel("mynewlabel") assert_equal(balance, Decimal("0.0")) if __name__ == '__main__': ReceivedByTest().main() diff --git a/test/functional/wallet_listsinceblock.py b/test/functional/wallet_listsinceblock.py index d486940c20..85401f5a41 100755 --- a/test/functional/wallet_listsinceblock.py +++ b/test/functional/wallet_listsinceblock.py @@ -1,292 +1,292 @@ #!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the listsincelast RPC.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_array_result, assert_raises_rpc_error class ListSinceBlockTest (BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.setup_clean_chain = True self.extra_args = [["-noparkdeepreorg"], ["-noparkdeepreorg"], [], []] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): self.nodes[2].generate(101) self.sync_all() self.test_no_blockhash() self.test_invalid_blockhash() self.test_reorg() self.test_double_spend() self.test_double_send() def test_no_blockhash(self): txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) blockhash, = self.nodes[2].generate(1) self.sync_all() txs = self.nodes[0].listtransactions() assert_array_result(txs, {"txid": txid}, { "category": "receive", "amount": 1, "blockhash": blockhash, "confirmations": 1, }) assert_equal( self.nodes[0].listsinceblock(), {"lastblock": blockhash, "removed": [], "transactions": txs}) assert_equal( self.nodes[0].listsinceblock(""), {"lastblock": blockhash, "removed": [], "transactions": txs}) def test_invalid_blockhash(self): assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, "42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4") assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, "0000000000000000000000000000000000000000000000000000000000000000") assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 11, for 'invalid-hex')", self.nodes[0].listsinceblock, "invalid-hex") assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'Z000000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].listsinceblock, "Z000000000000000000000000000000000000000000000000000000000000000") def test_reorg(self): ''' `listsinceblock` did not behave correctly when handed a block that was no longer in the main chain: ab0 / \ aa1 [tx0] bb1 | | aa2 bb2 | | aa3 bb3 | bb4 Consider a client that has only seen block `aa3` above. It asks the node to `listsinceblock aa3`. But at some point prior the main chain switched to the bb chain. Previously: listsinceblock would find height=4 for block aa3 and compare this to height=5 for the tip of the chain (bb4). It would then return results restricted to bb3-bb4. Now: listsinceblock finds the fork at ab0 and returns results in the range bb1-bb4. This test only checks that [tx0] is present. ''' # Split network into two self.split_network() # send to nodes[0] from nodes[2] senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) # generate on both sides lastblockhash = self.nodes[1].generate(6)[5] self.nodes[2].generate(7) self.log.info('lastblockhash={}'.format(lastblockhash)) self.sync_all(self.nodes[:2]) self.sync_all(self.nodes[2:]) self.join_network() # listsinceblock(lastblockhash) should now include tx, as seen from # nodes[0] lsbres = self.nodes[0].listsinceblock(lastblockhash) found = False for tx in lsbres['transactions']: if tx['txid'] == senttx: found = True break assert found def test_double_spend(self): ''' This tests the case where the same UTXO is spent twice on two separate blocks as part of a reorg. ab0 / \ aa1 [tx1] bb1 [tx2] | | aa2 bb2 | | aa3 bb3 | bb4 Problematic case: 1. User 1 receives BCH in tx1 from utxo1 in block aa1. 2. User 2 receives BCH in tx2 from utxo1 (same) in block bb1 3. User 1 sees 2 confirmations at block aa3. 4. Reorg into bb chain. 5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now invalidated. Currently the solution to this is to detect that a reorg'd block is asked for in listsinceblock, and to iterate back over existing blocks up until the fork point, and to include all transactions that relate to the node wallet. ''' self.sync_all() # Split network into two self.split_network() # share utxo between nodes[1] and nodes[2] utxos = self.nodes[2].listunspent() utxo = utxos[0] privkey = self.nodes[2].dumpprivkey(utxo['address']) self.nodes[1].importprivkey(privkey) # send from nodes[1] using utxo to nodes[0] change = '{:.8f}'.format(float(utxo['amount']) - 1.0003) recipientDict = { self.nodes[0].getnewaddress(): 1, self.nodes[1].getnewaddress(): change, } utxoDicts = [{ 'txid': utxo['txid'], 'vout': utxo['vout'], }] txid1 = self.nodes[1].sendrawtransaction( self.nodes[1].signrawtransactionwithwallet( self.nodes[1].createrawtransaction(utxoDicts, recipientDict))['hex']) # send from nodes[2] using utxo to nodes[3] recipientDict2 = { self.nodes[3].getnewaddress(): 1, self.nodes[2].getnewaddress(): change, } self.nodes[2].sendrawtransaction( self.nodes[2].signrawtransactionwithwallet( self.nodes[2].createrawtransaction(utxoDicts, recipientDict2))['hex']) # generate on both sides lastblockhash = self.nodes[1].generate(3)[2] self.nodes[2].generate(4) self.join_network() self.sync_all() # gettransaction should work for txid1 assert self.nodes[0].gettransaction( txid1)['txid'] == txid1, "gettransaction failed to find txid1" # listsinceblock(lastblockhash) should now include txid1, as seen from # nodes[0] lsbres = self.nodes[0].listsinceblock(lastblockhash) assert any(tx['txid'] == txid1 for tx in lsbres['removed']) # but it should not include 'removed' if include_removed=false lsbres2 = self.nodes[0].listsinceblock( blockhash=lastblockhash, include_removed=False) assert 'removed' not in lsbres2 def test_double_send(self): ''' This tests the case where the same transaction is submitted twice on two separate blocks as part of a reorg. The former will vanish and the latter will appear as the true transaction (with confirmations dropping as a result). ab0 / \ aa1 [tx1] bb1 | | aa2 bb2 | | aa3 bb3 [tx1] | bb4 Asserted: 1. tx1 is listed in listsinceblock. 2. It is included in 'removed' as it was removed, even though it is now present in a different block. 3. It is listed with a confirmation count of 2 (bb3, bb4), not 3 (aa1, aa2, aa3). ''' self.sync_all() # Split network into two self.split_network() # create and sign a transaction utxos = self.nodes[2].listunspent() utxo = utxos[0] change = '{:.8f}'.format(float(utxo['amount']) - 1.0003) recipientDict = { self.nodes[0].getnewaddress(): 1, self.nodes[2].getnewaddress(): change, } utxoDicts = [{ 'txid': utxo['txid'], 'vout': utxo['vout'], }] signedtxres = self.nodes[2].signrawtransactionwithwallet( self.nodes[2].createrawtransaction(utxoDicts, recipientDict)) assert signedtxres['complete'] signedtx = signedtxres['hex'] # send from nodes[1]; this will end up in aa1 txid1 = self.nodes[1].sendrawtransaction(signedtx) # generate bb1-bb2 on right side self.nodes[2].generate(2) # send from nodes[2]; this will end up in bb3 txid2 = self.nodes[2].sendrawtransaction(signedtx) assert_equal(txid1, txid2) # generate on both sides lastblockhash = self.nodes[1].generate(3)[2] self.nodes[2].generate(2) self.join_network() self.sync_all() # gettransaction should work for txid1 self.nodes[0].gettransaction(txid1) # listsinceblock(lastblockhash) should now include txid1 in transactions # as well as in removed lsbres = self.nodes[0].listsinceblock(lastblockhash) assert any(tx['txid'] == txid1 for tx in lsbres['transactions']) assert any(tx['txid'] == txid1 for tx in lsbres['removed']) # find transaction and ensure confirmations is valid for tx in lsbres['transactions']: if tx['txid'] == txid1: assert_equal(tx['confirmations'], 2) # the same check for the removed array; confirmations should STILL be 2 for tx in lsbres['removed']: if tx['txid'] == txid1: assert_equal(tx['confirmations'], 2) if __name__ == '__main__': ListSinceBlockTest().main() diff --git a/test/functional/wallet_resendwallettransactions.py b/test/functional/wallet_resendwallettransactions.py index 07235f5467..4754c2d56a 100755 --- a/test/functional/wallet_resendwallettransactions.py +++ b/test/functional/wallet_resendwallettransactions.py @@ -1,87 +1,87 @@ #!/usr/bin/env python3 -# Copyright (c) 2017-2018 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test that the wallet resends transactions periodically.""" from collections import defaultdict import time from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import ToHex from test_framework.mininode import P2PInterface, mininode_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, wait_until class P2PStoreTxInvs(P2PInterface): def __init__(self): super().__init__() self.tx_invs_received = defaultdict(int) def on_inv(self, message): # Store how many times invs have been received for each tx. for i in message.inv: if i.type == 1: # save txid self.tx_invs_received[i.hash] += 1 class ResendWalletTransactionsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): node = self.nodes[0] # alias node.add_p2p_connection(P2PStoreTxInvs()) self.log.info("Create a new transaction and wait until it's broadcast") txid = int(node.sendtoaddress(node.getnewaddress(), 1), 16) # Wallet rebroadcast is first scheduled 1 sec after startup (see # nNextResend in ResendWalletTransactions()). Sleep for just over a # second to be certain that it has been called before the first # setmocktime call below. time.sleep(1.1) # Can take a few seconds due to transaction trickling wait_until( lambda: node.p2p.tx_invs_received[txid] >= 1, lock=mininode_lock) # Add a second peer since txs aren't rebroadcast to the same peer (see # filterInventoryKnown) node.add_p2p_connection(P2PStoreTxInvs()) self.log.info("Create a block") # Create and submit a block without the transaction. # Transactions are only rebroadcast if there has been a block at least five minutes # after the last time we tried to broadcast. Use mocktime and give an # extra minute to be sure. block_time = int(time.time()) + 6 * 60 node.setmocktime(block_time) block = create_block(int(node.getbestblockhash(), 16), create_coinbase( node.getblockchaininfo()['blocks']), block_time) block.nVersion = 3 block.rehash() block.solve() node.submitblock(ToHex(block)) # Transaction should not be rebroadcast node.p2ps[1].sync_with_ping() assert_equal(node.p2ps[1].tx_invs_received[txid], 0) self.log.info("Transaction should be rebroadcast after 30 minutes") # Use mocktime and give an extra 5 minutes to be sure. rebroadcast_time = int(time.time()) + 41 * 60 node.setmocktime(rebroadcast_time) wait_until( lambda: node.p2ps[1].tx_invs_received[txid] >= 1, lock=mininode_lock) if __name__ == '__main__': ResendWalletTransactionsTest().main() diff --git a/test/functional/wallet_txn_clone.py b/test/functional/wallet_txn_clone.py index 9d0c86d41d..0614eb44d1 100755 --- a/test/functional/wallet_txn_clone.py +++ b/test/functional/wallet_txn_clone.py @@ -1,152 +1,151 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes, disconnect_nodes, - sync_blocks, ) class TxnMallTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.extra_args = [["-noparkdeepreorg"], ["-noparkdeepreorg"], [], []] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def add_options(self, parser): parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true", help="Test double-spend of 1-confirmed transaction") def setup_network(self): # Start with split network: super(TxnMallTest, self).setup_network() disconnect_nodes(self.nodes[1], self.nodes[2]) disconnect_nodes(self.nodes[2], self.nodes[1]) def run_test(self): output_type = "legacy" # All nodes should start with 1,250 BCH: starting_balance = 1250 for i in range(4): assert_equal(self.nodes[i].getbalance(), starting_balance) # bug workaround, coins generated assigned to first getnewaddress! self.nodes[i].getnewaddress() self.nodes[0].settxfee(.001) node0_address1 = self.nodes[0].getnewaddress(address_type=output_type) node0_txid1 = self.nodes[0].sendtoaddress(node0_address1, 1219) node0_tx1 = self.nodes[0].gettransaction(node0_txid1) node0_address2 = self.nodes[0].getnewaddress(address_type=output_type) node0_txid2 = self.nodes[0].sendtoaddress(node0_address2, 29) node0_tx2 = self.nodes[0].gettransaction(node0_txid2) assert_equal(self.nodes[0].getbalance(), starting_balance + node0_tx1["fee"] + node0_tx2["fee"]) # Coins are sent to node1_address node1_address = self.nodes[1].getnewaddress() # Send tx1, and another transaction tx2 that won't be cloned txid1 = self.nodes[0].sendtoaddress(node1_address, 40) txid2 = self.nodes[0].sendtoaddress(node1_address, 20) # Construct a clone of tx1, to be malleated rawtx1 = self.nodes[0].getrawtransaction(txid1, 1) clone_inputs = [{"txid": rawtx1["vin"][0]["txid"], "vout": rawtx1["vin"][0]["vout"], "sequence": rawtx1["vin"][0]["sequence"]}] clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][0]["value"], rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][1]["value"]} clone_locktime = rawtx1["locktime"] clone_raw = self.nodes[0].createrawtransaction( clone_inputs, clone_outputs, clone_locktime) # createrawtransaction randomizes the order of its outputs, so swap them if necessary. # output 0 is at version+#inputs+input+sigstub+sequence+#outputs # 40 BCH serialized is 00286bee00000000 pos0 = 2 * (4 + 1 + 36 + 1 + 4 + 1) hex40 = "00286bee00000000" output_len = 16 + 2 + 2 * \ int("0x" + clone_raw[pos0 + 16: pos0 + 16 + 2], 0) if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0: pos0 + 16] != hex40 or rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0: pos0 + 16] == hex40): output0 = clone_raw[pos0: pos0 + output_len] output1 = clone_raw[pos0 + output_len: pos0 + 2 * output_len] clone_raw = clone_raw[:pos0] + output1 + \ output0 + clone_raw[pos0 + 2 * output_len:] # Use a different signature hash type to sign. This creates an equivalent but malleated clone. # Don't send the clone anywhere yet tx1_clone = self.nodes[0].signrawtransactionwithwallet( clone_raw, None, "ALL|FORKID|ANYONECANPAY") assert_equal(tx1_clone["complete"], True) # Have node0 mine a block, if requested: if (self.options.mine_block): self.nodes[0].generate(1) - sync_blocks(self.nodes[0:2]) + self.sync_blocks(self.nodes[0:2]) tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Node0's balance should be starting balance, plus 50BTC for another # matured block, minus tx1 and tx2 amounts, and minus transaction fees: expected = starting_balance + node0_tx1["fee"] + node0_tx2["fee"] if self.options.mine_block: expected += 50 expected += tx1["amount"] + tx1["fee"] expected += tx2["amount"] + tx2["fee"] assert_equal(self.nodes[0].getbalance(), expected) if self.options.mine_block: assert_equal(tx1["confirmations"], 1) assert_equal(tx2["confirmations"], 1) else: assert_equal(tx1["confirmations"], 0) assert_equal(tx2["confirmations"], 0) # Send clone and its parent to miner self.nodes[2].sendrawtransaction(node0_tx1["hex"]) txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"]) # ... mine a block... self.nodes[2].generate(1) # Reconnect the split network, and sync chain: connect_nodes(self.nodes[1], self.nodes[2]) self.nodes[2].sendrawtransaction(node0_tx2["hex"]) self.nodes[2].sendrawtransaction(tx2["hex"]) self.nodes[2].generate(1) # Mine another block to make sure we sync - sync_blocks(self.nodes) + self.sync_blocks() # Re-fetch transaction info: tx1 = self.nodes[0].gettransaction(txid1) tx1_clone = self.nodes[0].gettransaction(txid1_clone) tx2 = self.nodes[0].gettransaction(txid2) # Verify expected confirmations assert_equal(tx1["confirmations"], -2) assert_equal(tx1_clone["confirmations"], 2) assert_equal(tx2["confirmations"], 1) # Check node0's total balance; should be same as before the clone, + 100 BCH for 2 matured, # less possible orphaned matured subsidy expected += 100 if (self.options.mine_block): expected -= 50 assert_equal(self.nodes[0].getbalance(), expected) if __name__ == '__main__': TxnMallTest().main() diff --git a/test/functional/wallet_txn_doublespend.py b/test/functional/wallet_txn_doublespend.py index ecef2042f9..c787328abe 100755 --- a/test/functional/wallet_txn_doublespend.py +++ b/test/functional/wallet_txn_doublespend.py @@ -1,151 +1,150 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the wallet accounts properly when there is a double-spend conflict.""" from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes, disconnect_nodes, find_output, - sync_blocks, ) class TxnMallTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.extra_args = [["-noparkdeepreorg"], ["-noparkdeepreorg"], [], []] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def add_options(self, parser): parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true", help="Test double-spend of 1-confirmed transaction") def setup_network(self): # Start with split network: super().setup_network() disconnect_nodes(self.nodes[1], self.nodes[2]) disconnect_nodes(self.nodes[2], self.nodes[1]) def run_test(self): # All nodes should start with 1,250 BCH: starting_balance = 1250 # All nodes should be out of IBD. # If the nodes are not all out of IBD, that can interfere with # blockchain sync later in the test when nodes are connected, due to # timing issues. for n in self.nodes: assert n.getblockchaininfo()["initialblockdownload"] is False for i in range(4): assert_equal(self.nodes[i].getbalance(), starting_balance) # bug workaround, coins generated assigned to first getnewaddress! self.nodes[i].getnewaddress("") # Assign coins to foo and bar addresses: node0_address_foo = self.nodes[0].getnewaddress() fund_foo_txid = self.nodes[0].sendtoaddress(node0_address_foo, 1219) fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid) node0_address_bar = self.nodes[0].getnewaddress() fund_bar_txid = self.nodes[0].sendtoaddress(node0_address_bar, 29) fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid) assert_equal(self.nodes[0].getbalance(), starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]) # Coins are sent to node1_address node1_address = self.nodes[1].getnewaddress() # First: use raw transaction API to send 1240 BCH to node1_address, # but don't broadcast: doublespend_fee = Decimal('-.02') rawtx_input_0 = {} rawtx_input_0["txid"] = fund_foo_txid rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219) rawtx_input_1 = {} rawtx_input_1["txid"] = fund_bar_txid rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29) inputs = [rawtx_input_0, rawtx_input_1] change_address = self.nodes[0].getnewaddress() outputs = {} outputs[node1_address] = 1240 outputs[change_address] = 1248 - 1240 + doublespend_fee rawtx = self.nodes[0].createrawtransaction(inputs, outputs) doublespend = self.nodes[0].signrawtransactionwithwallet(rawtx) assert_equal(doublespend["complete"], True) # Create two spends using 1 50 BCH coin each txid1 = self.nodes[0].sendtoaddress(node1_address, 40) txid2 = self.nodes[0].sendtoaddress(node1_address, 20) # Have node0 mine a block: if (self.options.mine_block): self.nodes[0].generate(1) - sync_blocks(self.nodes[0:2]) + self.sync_blocks(self.nodes[0:2]) tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Node0's balance should be starting balance, plus 50BTC for another # matured block, minus 40, minus 20, and minus transaction fees: expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] if self.options.mine_block: expected += 50 expected += tx1["amount"] + tx1["fee"] expected += tx2["amount"] + tx2["fee"] assert_equal(self.nodes[0].getbalance(), expected) if self.options.mine_block: assert_equal(tx1["confirmations"], 1) assert_equal(tx2["confirmations"], 1) # Node1's balance should be both transaction amounts: assert_equal(self.nodes[1].getbalance( ), starting_balance - tx1["amount"] - tx2["amount"]) else: assert_equal(tx1["confirmations"], 0) assert_equal(tx2["confirmations"], 0) # Now give doublespend and its parents to miner: self.nodes[2].sendrawtransaction(fund_foo_tx["hex"]) self.nodes[2].sendrawtransaction(fund_bar_tx["hex"]) doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"]) # ... mine a block... self.nodes[2].generate(1) # Reconnect the split network, and sync chain: connect_nodes(self.nodes[1], self.nodes[2]) self.nodes[2].generate(1) # Mine another block to make sure we sync - sync_blocks(self.nodes) + self.sync_blocks() assert_equal(self.nodes[0].gettransaction( doublespend_txid)["confirmations"], 2) # Re-fetch transaction info: tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Both transactions should be conflicted assert_equal(tx1["confirmations"], -2) assert_equal(tx2["confirmations"], -2) # Node0's total balance should be starting balance, plus 100BTC for # two more matured blocks, minus 1240 for the double-spend, plus fees (which are # negative): expected = starting_balance + 100 - 1240 + \ fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee assert_equal(self.nodes[0].getbalance(), expected) # Node1's balance should be its initial balance (1250 for 25 block # rewards) plus the doublespend: assert_equal(self.nodes[1].getbalance(), 1250 + 1240) if __name__ == '__main__': TxnMallTest().main()