diff --git a/test/functional/abc-p2p-fullblocktest.py b/test/functional/abc-p2p-fullblocktest.py index 4426a6d27..15495e11b 100755 --- a/test/functional/abc-p2p-fullblocktest.py +++ b/test/functional/abc-p2p-fullblocktest.py @@ -1,509 +1,504 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ This test checks simple acceptance of bigger blocks via p2p. It is derived from the much more complex p2p-fullblocktest. The intention is that small tests can be derived from this one, or this one can be extended, to cover the checks done for bigger blocks (e.g. sigops limits). """ from test_framework.test_framework import ComparisonTestFramework from test_framework.util import * from test_framework.comptool import TestManager, TestInstance, RejectResult from test_framework.blocktools import * import time from test_framework.key import CECKey from test_framework.script import * from test_framework.cdefs import (ONE_MEGABYTE, LEGACY_MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS_PER_MB, MAX_TX_SIGOPS_COUNT) class PreviousSpendableOutput(): def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n # the output we're spending # TestNode: A peer we use to send messages to bitcoind, and store responses. class TestNode(NodeConnCB): def __init__(self): self.last_sendcmpct = None self.last_cmpctblock = None self.last_getheaders = None self.last_headers = None super().__init__() def on_sendcmpct(self, conn, message): self.last_sendcmpct = message def on_cmpctblock(self, conn, message): self.last_cmpctblock = message self.last_cmpctblock.header_and_shortids.header.calc_sha256() def on_getheaders(self, conn, message): self.last_getheaders = message def on_headers(self, conn, message): self.last_headers = message for x in self.last_headers.headers: x.calc_sha256() def clear_block_data(self): with mininode_lock: self.last_sendcmpct = None self.last_cmpctblock = None class FullBlockTest(ComparisonTestFramework): # Can either run this test as 1 node with expected answers, or two and compare them. # Change the "outcome" variable from each TestInstance object to only do # the comparison. def __init__(self): super().__init__() self.num_nodes = 1 self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"fatstacks") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.tip = None self.blocks = {} self.excessive_block_size = 16 * ONE_MEGABYTE self.extra_args = [['-norelaypriority', '-whitelist=127.0.0.1', '-limitancestorcount=9999', '-limitancestorsize=9999', '-limitdescendantcount=9999', '-limitdescendantsize=9999', '-maxmempool=999', "-excessiveblocksize=%d" % self.excessive_block_size]] def add_options(self, parser): super().add_options(parser) parser.add_option( "--runbarelyexpensive", dest="runbarelyexpensive", default=True) def run_test(self): self.test = TestManager(self, self.options.tmpdir) self.test.add_all_connections(self.nodes) # Start up network handling in another thread NetworkThread().start() # Set the blocksize to 2MB as initial condition self.nodes[0].setexcessiveblock(self.excessive_block_size) self.test.run() def add_transactions_to_block(self, block, tx_list): [tx.rehash() for tx in tx_list] block.vtx.extend(tx_list) # this is a little handier to use than the version in blocktools.py def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = create_transaction(spend_tx, n, b"", value, script) return tx # sign a transaction, using the key we know about # this signs input 0 in tx, which is assumed to be spending output n in # spend_tx def sign_tx(self, tx, spend_tx, n): scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend tx.vin[0].scriptSig = CScript() return sighash = SignatureHashForkId( spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue) tx.vin[0].scriptSig = CScript( [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = self.create_tx(spend_tx, n, value, script) self.sign_tx(tx, spend_tx, n) tx.rehash() return tx def next_block(self, number, spend=None, additional_coinbase_value=0, script=None, extra_sigops=0, block_size=0, solve=True): """ Create a block on top of self.tip, and advance self.tip to point to the new block if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output, and rest will go to fees. """ if self.tip == None: base_block_hash = self.genesis_hash block_time = int(time.time()) + 1 else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) coinbase.vout[0].nValue += additional_coinbase_value if (spend != None): coinbase.vout[0].nValue += spend.tx.vout[ spend.n].nValue - 1 # all but one satoshi to fees coinbase.rehash() block = create_block(base_block_hash, coinbase, block_time) spendable_output = None if (spend != None): tx = CTransaction() # no signature yet tx.vin.append( CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # We put some random data into the first transaction of the chain # to randomize ids tx.vout.append( CTxOut(0, CScript([random.randint(0, 255), OP_DROP, OP_TRUE]))) if script == None: tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) else: tx.vout.append(CTxOut(1, script)) spendable_output = PreviousSpendableOutput(tx, 0) # Now sign it if necessary scriptSig = b"" scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend scriptSig = CScript([OP_TRUE]) else: # We have to actually sign it sighash = SignatureHashForkId( spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend.tx.vout[spend.n].nValue) scriptSig = CScript( [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) tx.vin[0].scriptSig = scriptSig # Now add the transaction to the block self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() if spendable_output != None and block_size > 0: while len(block.serialize()) < block_size: tx = CTransaction() script_length = block_size - len(block.serialize()) - 79 if script_length > 510000: script_length = 500000 tx_sigops = min( extra_sigops, script_length, MAX_TX_SIGOPS_COUNT) extra_sigops -= tx_sigops script_pad_len = script_length - tx_sigops script_output = CScript( [b'\x00' * script_pad_len] + [OP_CHECKSIG] * tx_sigops) tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx.vout.append(CTxOut(0, script_output)) tx.vin.append( CTxIn(COutPoint(spendable_output.tx.sha256, spendable_output.n))) spendable_output = PreviousSpendableOutput(tx, 0) self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() # Make sure the math above worked out to produce the correct block size # (the math will fail if there are too many transactions in the block) assert_equal(len(block.serialize()), block_size) # Make sure all the requested sigops have been included assert_equal(extra_sigops, 0) if solve: block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(5000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Let's build some blocks and test them. for i in range(16): n = i + 1 block(n, spend=out[i], block_size=n * ONE_MEGABYTE) yield accepted() # block of maximal size block(17, spend=out[16], block_size=self.excessive_block_size) yield accepted() # Reject oversized blocks with bad-blk-length error block(18, spend=out[17], block_size=self.excessive_block_size + 1) yield rejected(RejectResult(16, b'bad-blk-length')) # Rewind bad block. tip(17) # Accept many sigops lots_of_checksigs = CScript( [OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB - 1)) block( 19, spend=out[17], script=lots_of_checksigs, block_size=ONE_MEGABYTE) yield accepted() too_many_blk_checksigs = CScript( [OP_CHECKSIG] * MAX_BLOCK_SIGOPS_PER_MB) block( 20, spend=out[18], script=too_many_blk_checksigs, block_size=ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(19) # Accept 40k sigops per block > 1MB and <= 2MB block(21, spend=out[18], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=ONE_MEGABYTE + 1) yield accepted() # Accept 40k sigops per block > 1MB and <= 2MB block(22, spend=out[19], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE) yield accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. block(23, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(22) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(24, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(22) # Accept 60k sigops per block > 2MB and <= 3MB block(25, spend=out[20], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE + 1) yield accepted() # Accept 60k sigops per block > 2MB and <= 3MB block(26, spend=out[21], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=3 * ONE_MEGABYTE) yield accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. block(27, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(26) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(28, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=3 * ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(26) # Too many sigops in one txn too_many_tx_checksigs = CScript( [OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB + 1)) block( 29, spend=out[22], script=too_many_tx_checksigs, block_size=ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block tip(26) # P2SH # Build the redeem script, hash it, use hash to create the p2sh script redeem_script = CScript([self.coinbase_pubkey] + [ OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a p2sh transaction p2sh_tx = self.create_and_sign_transaction( out[22].tx, out[22].n, 1, p2sh_script) # Add the transaction to the block block(30) update_block(30, [p2sh_tx]) yield accepted() # Creates a new transaction using the p2sh transaction included in the # last block def spend_p2sh_tx(output_script=CScript([OP_TRUE])): # Create the transaction spent_p2sh_tx = CTransaction() spent_p2sh_tx.vin.append(CTxIn(COutPoint(p2sh_tx.sha256, 0), b'')) spent_p2sh_tx.vout.append(CTxOut(1, output_script)) # Sign the transaction using the redeem script sighash = SignatureHashForkId( redeem_script, spent_p2sh_tx, 0, SIGHASH_ALL | SIGHASH_FORKID, p2sh_tx.vout[0].nValue) sig = self.coinbase_key.sign(sighash) + bytes( bytearray([SIGHASH_ALL | SIGHASH_FORKID])) spent_p2sh_tx.vin[0].scriptSig = CScript([sig, redeem_script]) spent_p2sh_tx.rehash() return spent_p2sh_tx # Sigops p2sh limit p2sh_sigops_limit = MAX_BLOCK_SIGOPS_PER_MB - \ redeem_script.GetSigOpCount(True) # Too many sigops in one p2sh txn too_many_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit + 1)) block(31, spend=out[23], block_size=ONE_MEGABYTE + 1) update_block(31, [spend_p2sh_tx(too_many_p2sh_sigops)]) yield rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block tip(30) # Max sigops in one p2sh txn max_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit)) block(32, spend=out[23], block_size=ONE_MEGABYTE + 1) update_block(32, [spend_p2sh_tx(max_p2sh_sigops)]) yield accepted() # Check that compact block also work for big blocks node = self.nodes[0] peer = TestNode() peer.add_connection(NodeConn('127.0.0.1', p2p_port(0), node, peer)) # Start up network handling in another thread and wait for connection # to be etablished NetworkThread().start() peer.wait_for_verack() # Wait for SENDCMPCT def received_sendcmpct(): return (peer.last_sendcmpct != None) - got_sendcmpt = wait_until(received_sendcmpct, timeout=30) - assert(got_sendcmpt) + wait_until(received_sendcmpct, timeout=30) sendcmpct = msg_sendcmpct() sendcmpct.version = 1 sendcmpct.announce = True peer.send_and_ping(sendcmpct) # Exchange headers def received_getheaders(): return (peer.last_getheaders != None) - got_getheaders = wait_until(received_getheaders, timeout=30) - assert(got_getheaders) + wait_until(received_getheaders, timeout=30) # Return the favor peer.send_message(peer.last_getheaders) # Wait for the header list def received_headers(): return (peer.last_headers != None) - got_headers = wait_until(received_headers, timeout=30) - assert(got_headers) + wait_until(received_headers, timeout=30) # It's like we know about the same headers ! peer.send_message(peer.last_headers) # Send a block b33 = block(33, spend=out[24], block_size=ONE_MEGABYTE + 1) yield accepted() # Checks the node to forward it via compact block def received_block(): return (peer.last_cmpctblock != None) - got_cmpctblock = wait_until(received_block, timeout=30) - assert(got_cmpctblock) + wait_until(received_block, timeout=30) # Was it our block ? cmpctblk_header = peer.last_cmpctblock.header_and_shortids.header cmpctblk_header.calc_sha256() assert(cmpctblk_header.sha256 == b33.sha256) # Send a bigger block peer.clear_block_data() b34 = block(34, spend=out[25], block_size=8 * ONE_MEGABYTE) yield accepted() # Checks the node to forward it via compact block - got_cmpctblock = wait_until(received_block, timeout=30) - assert(got_cmpctblock) + wait_until(received_block, timeout=30) # Was it our block ? cmpctblk_header = peer.last_cmpctblock.header_and_shortids.header cmpctblk_header.calc_sha256() assert(cmpctblk_header.sha256 == b34.sha256) # Let's send a compact block and see if the node accepts it. # First, we generate the block and send all transaction to the mempool b35 = block(35, spend=out[26], block_size=8 * ONE_MEGABYTE) for i in range(1, len(b35.vtx)): node.sendrawtransaction(ToHex(b35.vtx[i]), True) # Now we create the compact block and send it comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(b35) peer.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) # Check that compact block is received properly assert(int(node.getbestblockhash(), 16) == b35.sha256) if __name__ == '__main__': FullBlockTest().main() diff --git a/test/functional/bip65-cltv-p2p.py b/test/functional/bip65-cltv-p2p.py index f274cd097..21d3b053b 100755 --- a/test/functional/bip65-cltv-p2p.py +++ b/test/functional/bip65-cltv-p2p.py @@ -1,179 +1,181 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test BIP65 (CHECKLOCKTIMEVERIFY). Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height 1351. """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.mininode import * from test_framework.blocktools import create_coinbase, create_block from test_framework.script import CScript, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP, CScriptNum from io import BytesIO CLTV_HEIGHT = 1351 # Reject codes that we might receive in this test REJECT_INVALID = 16 REJECT_OBSOLETE = 17 REJECT_NONSTANDARD = 64 def cltv_invalidate(tx): '''Modify the signature in vin 0 of the tx to fail CLTV Prepends -1 CLTV DROP in the scriptSig itself. TODO: test more ways that transactions using CLTV could be invalid (eg locktime requirements fail, sequence time requirements fail, etc). ''' tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP] + list(CScript(tx.vin[0].scriptSig))) def cltv_validate(node, tx, height): '''Modify the signature in vin 0 of the tx to pass CLTV Prepends CLTV DROP in the scriptSig, and sets the locktime to height''' tx.vin[0].nSequence = 0 tx.nLockTime = height # Need to re-sign, since nSequence and nLockTime changed signed_result = node.signrawtransaction( ToHex(tx), None, None, "ALL|FORKID") new_tx = CTransaction() new_tx.deserialize(BytesIO(hex_str_to_bytes(signed_result['hex']))) new_tx.vin[0].scriptSig = CScript([CScriptNum(height), OP_CHECKLOCKTIMEVERIFY, OP_DROP] + list(CScript(new_tx.vin[0].scriptSig))) return new_tx def create_transaction(node, coinbase, to_address, amount): from_txid = node.getblock(coinbase)['tx'][0] inputs = [{"txid": from_txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransaction(rawtx, None, None, "ALL|FORKID") tx = CTransaction() tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex']))) return tx class BIP65Test(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 self.extra_args = [ ['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']] self.setup_clean_chain = True def run_test(self): node0 = NodeConnCB() connections = [] connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) node0.add_connection(connections[0]) NetworkThread().start() # Start up network handling in another thread # wait_for_verack ensures that the P2P connection is fully up. node0.wait_for_verack() self.log.info("Mining %d blocks", CLTV_HEIGHT - 2) self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2) self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that an invalid-according-to-CLTV transaction can still appear in a block") spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0], self.nodeaddress, 1.0) cltv_invalidate(spendtx) spendtx.rehash() tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase( CLTV_HEIGHT - 1), block_time) block.nVersion = 3 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() node0.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 4") tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) block.nVersion = 3 block.solve() node0.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - assert wait_until(lambda: "reject" in node0.last_message.keys()) + wait_until(lambda: "reject" in node0.last_message.keys(), + lock=mininode_lock) with mininode_lock: assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE) assert_equal( node0.last_message["reject"].reason, b'bad-version(0x00000003)') assert_equal(node0.last_message["reject"].data, block.sha256) del node0.last_message["reject"] self.log.info( "Test that invalid-according-to-cltv transactions cannot appear in a block") block.nVersion = 4 spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) cltv_invalidate(spendtx) spendtx.rehash() # First we show that this tx is valid except for CLTV by getting it # accepted to the mempool (which we can achieve with # -promiscuousmempoolflags). node0.send_and_ping(msg_tx(spendtx)) assert spendtx.hash in self.nodes[0].getrawmempool() # Now we verify that a block with this transaction is invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() node0.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - assert wait_until(lambda: "reject" in node0.last_message.keys()) + wait_until(lambda: "reject" in node0.last_message.keys(), + lock=mininode_lock) with mininode_lock: assert node0.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD] assert_equal(node0.last_message["reject"].data, block.sha256) if node0.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal( node0.last_message["reject"].reason, b'blk-bad-inputs') else: assert b'Negative locktime' in node0.last_message["reject"].reason self.log.info( "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted") spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1) spendtx.rehash() block.vtx.pop(1) block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() node0.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) if __name__ == '__main__': BIP65Test().main() diff --git a/test/functional/bipdersig-p2p.py b/test/functional/bipdersig-p2p.py index 4ea1068a4..9a30f1679 100755 --- a/test/functional/bipdersig-p2p.py +++ b/test/functional/bipdersig-p2p.py @@ -1,145 +1,147 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test BIP66 (DER SIG). Test that the DERSIG soft-fork activates at (regtest) height 1251. """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.mininode import * from test_framework.blocktools import create_coinbase, create_block from test_framework.script import CScript from io import BytesIO DERSIG_HEIGHT = 1251 # Reject codes that we might receive in this test REJECT_INVALID = 16 REJECT_OBSOLETE = 17 REJECT_NONSTANDARD = 64 # A canonical signature consists of: # <30> <02> <02> def unDERify(tx): """ Make the signature in vin 0 of a tx non-DER-compliant, by adding padding after the S-value. """ scriptSig = CScript(tx.vin[0].scriptSig) newscript = [] for i in scriptSig: if (len(newscript) == 0): newscript.append(i[0:-1] + b'\0' + i[-1:]) else: newscript.append(i) tx.vin[0].scriptSig = CScript(newscript) def create_transaction(node, coinbase, to_address, amount): from_txid = node.getblock(coinbase)['tx'][0] inputs = [{"txid": from_txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransaction(rawtx, None, None, "ALL|FORKID") tx = CTransaction() tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex']))) return tx class BIP66Test(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 self.extra_args = [ ['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']] self.setup_clean_chain = True def run_test(self): node0 = NodeConnCB() connections = [] connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) node0.add_connection(connections[0]) NetworkThread().start() # Start up network handling in another thread # wait_for_verack ensures that the P2P connection is fully up. node0.wait_for_verack() self.log.info("Mining %d blocks", DERSIG_HEIGHT - 1) self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 1) self.nodeaddress = self.nodes[0].getnewaddress() self.log.info("Test that blocks must now be at least version 3") tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block( int(tip, 16), create_coinbase(DERSIG_HEIGHT), block_time) block.nVersion = 2 block.rehash() block.solve() node0.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) - assert wait_until(lambda: "reject" in node0.last_message.keys()) + wait_until(lambda: "reject" in node0.last_message.keys(), + lock=mininode_lock) with mininode_lock: assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE) assert_equal( node0.last_message["reject"].reason, b'bad-version(0x00000002)') assert_equal(node0.last_message["reject"].data, block.sha256) del node0.last_message["reject"] self.log.info( "Test that transactions with non-DER signatures cannot appear in a block") block.nVersion = 3 spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) unDERify(spendtx) spendtx.rehash() # Now we verify that a block with this transaction is invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node0.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) - assert wait_until(lambda: "reject" in node0.last_message.keys()) + wait_until(lambda: "reject" in node0.last_message.keys(), + lock=mininode_lock) with mininode_lock: # We can receive different reject messages depending on whether # bitcoind is running with multiple script check threads. If script # check threads are not in use, then transaction script validation # happens sequentially, and bitcoind produces more specific reject # reasons. assert node0.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD] assert_equal(node0.last_message["reject"].data, block.sha256) if node0.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal( node0.last_message["reject"].reason, b'blk-bad-inputs') else: assert b'Non-canonical DER signature' in node0.last_message["reject"].reason self.log.info( "Test that a version 3 block with a DERSIG-compliant transaction is accepted") block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node0.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) if __name__ == '__main__': BIP66Test().main() diff --git a/test/functional/disconnect_ban.py b/test/functional/disconnect_ban.py index 284697c59..590f30b74 100755 --- a/test/functional/disconnect_ban.py +++ b/test/functional/disconnect_ban.py @@ -1,125 +1,127 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node disconnect and ban behavior""" -from test_framework.mininode import wait_until from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import (assert_equal, - assert_raises_jsonrpc, - connect_nodes_bi) +from test_framework.util import ( + assert_equal, + assert_raises_jsonrpc, + connect_nodes_bi, + wait_until, +) class DisconnectBanTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 2 self.setup_clean_chain = False def run_test(self): self.log.info("Test setban and listbanned RPCs") self.log.info("setban: successfully ban single IP address") # node1 should have 2 connections to node0 at this point assert_equal(len(self.nodes[1].getpeerinfo()), 2) self.nodes[1].setban("127.0.0.1", "add") - wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0) + wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10) # all nodes must be disconnected at this point assert_equal(len(self.nodes[1].getpeerinfo()), 0) assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("clearbanned: successfully clear ban list") self.nodes[1].clearbanned() assert_equal(len(self.nodes[1].listbanned()), 0) self.nodes[1].setban("127.0.0.0/24", "add") self.log.info("setban: fail to ban an already banned subnet") assert_equal(len(self.nodes[1].listbanned()), 1) assert_raises_jsonrpc( -23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add") self.log.info("setban: fail to ban an invalid subnet") assert_raises_jsonrpc( -30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add") # still only one banned ip because 127.0.0.1 is within the range of # 127.0.0.0/24 assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("setban remove: fail to unban a non-banned subnet") assert_raises_jsonrpc( -30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove") assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("setban remove: successfully unban subnet") self.nodes[1].setban("127.0.0.0/24", "remove") assert_equal(len(self.nodes[1].listbanned()), 0) self.nodes[1].clearbanned() assert_equal(len(self.nodes[1].listbanned()), 0) self.log.info("setban: test persistence across node restart") self.nodes[1].setban("127.0.0.0/32", "add") self.nodes[1].setban("127.0.0.0/24", "add") # ban for 1 seconds self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1000 seconds self.nodes[1].setban( "2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) listBeforeShutdown = self.nodes[1].listbanned() assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) wait_until(lambda: len(self.nodes[1].listbanned()) == 3) self.stop_node(1) self.nodes[1] = self.start_node(1, self.options.tmpdir) listAfterShutdown = self.nodes[1].listbanned() assert_equal("127.0.0.0/24", listAfterShutdown[0]['address']) assert_equal("127.0.0.0/32", listAfterShutdown[1]['address']) assert_equal("/19" in listAfterShutdown[2]['address'], True) # Clear ban lists self.nodes[1].clearbanned() connect_nodes_bi(self.nodes, 0, 1) self.log.info("Test disconnectnode RPCs") self.log.info( "disconnectnode: fail to disconnect when calling with address and nodeid") address1 = self.nodes[0].getpeerinfo()[0]['addr'] node1 = self.nodes[0].getpeerinfo()[0]['addr'] assert_raises_jsonrpc( -32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1) self.log.info( "disconnectnode: fail to disconnect when calling with junk address") assert_raises_jsonrpc(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street") self.log.info( "disconnectnode: successfully disconnect node by address") address1 = self.nodes[0].getpeerinfo()[0]['addr'] self.nodes[0].disconnectnode(address=address1) - wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1) - assert not [node for node in self.nodes[0] - .getpeerinfo() if node['addr'] == address1] + wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10) + assert not [node for node in self.nodes[0].getpeerinfo() + if node['addr'] == address1] self.log.info("disconnectnode: successfully reconnect node") # reconnect the node connect_nodes_bi(self.nodes, 0, 1) assert_equal(len(self.nodes[0].getpeerinfo()), 2) assert [node for node in self.nodes[0] .getpeerinfo() if node['addr'] == address1] self.log.info( "disconnectnode: successfully disconnect node by node id") id1 = self.nodes[0].getpeerinfo()[0]['id'] self.nodes[0].disconnectnode(nodeid=id1) - wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1) - assert not [node for node in self.nodes[ - 0].getpeerinfo() if node['id'] == id1] + wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10) + assert not [node for node in self.nodes[0].getpeerinfo() + if node['id'] == id1] if __name__ == '__main__': DisconnectBanTest().main() diff --git a/test/functional/example_test.py b/test/functional/example_test.py index 6313eb766..e48499b20 100755 --- a/test/functional/example_test.py +++ b/test/functional/example_test.py @@ -1,231 +1,231 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """An example functional test The module-level docstring should include a high-level description of what the test is doing. It's the first thing people see when they open the file and should give the reader information about *what* the test is testing and *how* it's being tested """ # Imports should be in PEP8 ordering (std library first, then third party # libraries then local imports). from collections import defaultdict # Avoid wildcard * imports if possible from test_framework.blocktools import (create_block, create_coinbase) from test_framework.mininode import ( CInv, NetworkThread, NodeConn, NodeConnCB, mininode_lock, msg_block, msg_getdata, - wait_until, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes, p2p_port, + wait_until, ) # NodeConnCB is a class containing callbacks to be executed when a P2P # message is received from the node-under-test. Subclass NodeConnCB and # override the on_*() methods if you need custom behaviour. class BaseNode(NodeConnCB): def __init__(self): """Initialize the NodeConnCB Used to inialize custom properties for the Node that aren't included by default in the base class. Be aware that the NodeConnCB base class already stores a counter for each P2P message type and the last received message of each type, which should be sufficient for the needs of most tests. Call super().__init__() first for standard initialization and then initialize custom properties.""" super().__init__() # Stores a dictionary of all blocks received self.block_receive_map = defaultdict(int) def on_block(self, conn, message): """Override the standard on_block callback Store the hash of a received block in the dictionary.""" message.block.calc_sha256() self.block_receive_map[message.block.sha256] += 1 def custom_function(): """Do some custom behaviour If this function is more generally useful for other tests, consider moving it to a module in test_framework.""" # self.log.info("running custom_function") # Oops! Can't run self.log outside the BitcoinTestFramework pass class ExampleTest(BitcoinTestFramework): # Each functional test is a subclass of the BitcoinTestFramework class. # Override the __init__(), add_options(), setup_chain(), setup_network() # and setup_nodes() methods to customize the test setup as required. def __init__(self): """Initialize the test Call super().__init__() first, and then override any test parameters for your individual test.""" super().__init__() self.setup_clean_chain = True self.num_nodes = 3 # Use self.extra_args to change command-line arguments for the nodes self.extra_args = [[], ["-logips"], []] # self.log.info("I've finished __init__") # Oops! Can't run self.log before run_test() # Use add_options() to add specific command-line options for your test. # In practice this is not used very much, since the tests are mostly written # to be run in automated environments without command-line options. # def add_options() # pass # Use setup_chain() to customize the node data directories. In practice # this is not used very much since the default behaviour is almost always # fine # def setup_chain(): # pass def setup_network(self): """Setup the test network topology Often you won't need to override this, since the standard network topology (linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests. If you do override this method, remember to start the nodes, assign them to self.nodes, connect them and then sync.""" self.setup_nodes() # In this test, we're not connecting node2 to node0 or node1. Calls to # sync_all() should not include node2, since we're not expecting it to # sync. connect_nodes(self.nodes[0], 1) self.sync_all([self.nodes[0:1]]) # Use setup_nodes() to customize the node start behaviour (for example if # you don't want to start all nodes at the start of the test). # def setup_nodes(): # pass def custom_method(self): """Do some custom behaviour for this test Define it in a method here because you're going to use it repeatedly. If you think it's useful in general, consider moving it to the base BitcoinTestFramework class so other tests can use it.""" self.log.info("Running custom_method") def run_test(self): """Main test logic""" # Create a P2P connection to one of the nodes node0 = BaseNode() connections = [] connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) node0.add_connection(connections[0]) # Start up network handling in another thread. This needs to be called # after the P2P connections have been created. NetworkThread().start() # wait_for_verack ensures that the P2P connection is fully up. node0.wait_for_verack() # Generating a block on one of the nodes will get us out of IBD blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)] self.sync_all([self.nodes[0:1]]) # Notice above how we called an RPC by calling a method with the same # name on the node object. Notice also how we used a keyword argument # to specify a named RPC argument. Neither of those are defined on the # node object. Instead there's some __getattr__() magic going on under # the covers to dispatch unrecognised attribute calls to the RPC # interface. # Logs are nice. Do plenty of them. They can be used in place of comments for # breaking the test into sub-sections. self.log.info("Starting test!") self.log.info("Calling a custom function") custom_function() self.log.info("Calling a custom method") self.custom_method() self.log.info("Create some blocks") self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] + 1 height = 1 for i in range(10): # Use the mininode and blocktools functionality to manually build a block # Calling the generate() rpc is easier, but this allows us to exactly # control the blocks and transactions. block = create_block( self.tip, create_coinbase(height), self.block_time) block.solve() block_message = msg_block(block) # Send message is used to send a P2P message to the node over our NodeConn connection node0.send_message(block_message) self.tip = block.sha256 blocks.append(self.tip) self.block_time += 1 height += 1 self.log.info( "Wait for node1 to reach current tip (height 11) using RPC") self.nodes[1].waitforblockheight(11) self.log.info("Connect node2 and node1") connect_nodes(self.nodes[1], 2) self.log.info("Add P2P connection to node2") node2 = BaseNode() connections.append( NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2)) node2.add_connection(connections[1]) node2.wait_for_verack() self.log.info( "Wait for node2 reach current tip. Test that it has propagated all the blocks to us") for block in blocks: getdata_request = msg_getdata() getdata_request.inv.append(CInv(2, block)) node2.send_message(getdata_request) # wait_until() will loop until a predicate condition is met. Use it to test properties of the # NodeConnCB objects. - assert wait_until(lambda: sorted(blocks) == sorted( - list(node2.block_receive_map.keys())), timeout=5) + wait_until(lambda: sorted(blocks) == sorted( + list(node2.block_receive_map.keys())), timeout=5, lock=mininode_lock) self.log.info("Check that each block was received only once") # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving # messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate. with mininode_lock: for block in node2.block_receive_map.values(): assert_equal(block, 1) if __name__ == '__main__': ExampleTest().main() diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py index fbd20c85e..e04cdf4d3 100755 --- a/test/functional/mempool_persist.py +++ b/test/functional/mempool_persist.py @@ -1,98 +1,97 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test mempool persistence. By default, bitcoind will dump mempool on shutdown and then reload it on startup. This can be overridden with the -persistmempool=0 command line option. Test is as follows: - start node0, node1 and node2. node1 has -persistmempool=0 - create 5 transactions on node2 to its own address. Note that these are not sent to node0 or node1 addresses because we don't want them to be saved in the wallet. - check that node0 and node1 have 5 transactions in their mempools - shutdown all nodes. - startup node0. Verify that it still has 5 transactions in its mempool. Shutdown node0. This tests that by default the mempool is persistent. - startup node1. Verify that its mempool is empty. Shutdown node1. This tests that with -persistmempool=0, the mempool is not dumped to disk when the node is shut down. - Restart node0 with -persistmempool=0. Verify that its mempool is empty. Shutdown node0. This tests that with -persistmempool=0, the mempool is not loaded from disk on start up. - Restart node0 with -persistmempool. Verify that it has 5 transactions in its mempool. This tests that -persistmempool=0 does not overwrite a previously valid mempool stored on disk. """ import time -from test_framework.mininode import wait_until from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class MempoolPersistTest(BitcoinTestFramework): def __init__(self): super().__init__() # We need 3 nodes for this test. Node1 does not have a persistent mempool. self.num_nodes = 3 self.setup_clean_chain = False self.extra_args = [[], ["-persistmempool=0"], []] def run_test(self): chain_height = self.nodes[0].getblockcount() assert_equal(chain_height, 200) self.log.debug("Mine a single block to get out of IBD") self.nodes[0].generate(1) self.sync_all() self.log.debug("Send 5 transactions from node2 (to its own address)") for i in range(5): self.nodes[2].sendtoaddress( self.nodes[2].getnewaddress(), Decimal("10")) self.sync_all() self.log.debug( "Verify that node0 and node1 have 5 transactions in their mempools") assert_equal(len(self.nodes[0].getrawmempool()), 5) assert_equal(len(self.nodes[1].getrawmempool()), 5) self.log.debug( "Stop-start node0 and node1. Verify that node0 has the transactions in its mempool and node1 does not.") self.stop_nodes() self.nodes = [] self.nodes.append(self.start_node(0, self.options.tmpdir)) self.nodes.append(self.start_node(1, self.options.tmpdir)) # Give bitcoind a second to reload the mempool time.sleep(1) - assert wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5) + wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5) assert_equal(len(self.nodes[1].getrawmempool()), 0) self.log.debug( "Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.") self.stop_nodes() self.nodes = [] self.nodes.append(self.start_node( 0, self.options.tmpdir, ["-persistmempool=0"])) # Give bitcoind a second to reload the mempool time.sleep(1) assert_equal(len(self.nodes[0].getrawmempool()), 0) self.log.debug( "Stop-start node0. Verify that it has the transactions in its mempool.") self.stop_nodes() self.nodes = [] self.nodes.append(self.start_node(0, self.options.tmpdir)) - assert wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5) + wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5) if __name__ == '__main__': MempoolPersistTest().main() diff --git a/test/functional/p2p-compactblocks.py b/test/functional/p2p-compactblocks.py index 2117c4f36..804d8116d 100755 --- a/test/functional/p2p-compactblocks.py +++ b/test/functional/p2p-compactblocks.py @@ -1,939 +1,931 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment from test_framework.script import CScript, OP_TRUE ''' CompactBlocksTest -- test compact blocks (BIP 152) Only testing Version 1 compact blocks (txids) ''' # TestNode: A peer we use to send messages to bitcoind, and store responses. class TestNode(NodeConnCB): def __init__(self): super().__init__() self.last_sendcmpct = [] self.block_announced = False # Store the hashes of blocks we've seen announced. # This is for synchronizing the p2p message traffic, # so we can eg wait until a particular block is announced. self.announced_blockhashes = set() def on_sendcmpct(self, conn, message): self.last_sendcmpct.append(message) def on_cmpctblock(self, conn, message): self.block_announced = True self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() self.announced_blockhashes.add( self.last_message["cmpctblock"].header_and_shortids.header.sha256) def on_headers(self, conn, message): self.block_announced = True for x in self.last_message["headers"].headers: x.calc_sha256() self.announced_blockhashes.add(x.sha256) def on_inv(self, conn, message): for x in self.last_message["inv"].inv: if x.type == 2: self.block_announced = True self.announced_blockhashes.add(x.hash) # Requires caller to hold mininode_lock def received_block_announcement(self): return self.block_announced def clear_block_announcement(self): with mininode_lock: self.block_announced = False self.last_message.pop("inv", None) self.last_message.pop("headers", None) self.last_message.pop("cmpctblock", None) def get_headers(self, locator, hashstop): msg = msg_getheaders() msg.locator.vHave = locator msg.hashstop = hashstop self.connection.send_message(msg) def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) def request_headers_and_sync(self, locator, hashstop=0): self.clear_block_announcement() self.get_headers(locator, hashstop) - assert wait_until(self.received_block_announcement, timeout=30) + wait_until(self.received_block_announcement, + timeout=30, lock=mininode_lock) self.clear_block_announcement() # Block until a block announcement for a particular block hash is # received. def wait_for_block_announcement(self, block_hash, timeout=30): def received_hash(): return (block_hash in self.announced_blockhashes) - return wait_until(received_hash, timeout=timeout) + wait_until(received_hash, timeout=timeout, lock=mininode_lock) def send_await_disconnect(self, message, timeout=30): """Sends a message to the node and wait for disconnect. This is used when we want to send a message into the node that we expect will get us disconnected, eg an invalid block.""" self.send_message(message) - success = wait_until(lambda: not self.connected, timeout=timeout) - if not success: - logger.error("send_await_disconnect failed!") - raise AssertionError("send_await_disconnect failed!") - return success + wait_until(lambda: not self.connected, + timeout=timeout, lock=mininode_lock) class CompactBlocksTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [[], ["-txindex"]] self.utxos = [] def build_block_on_tip(self, node): height = node.getblockcount() tip = node.getbestblockhash() mtp = node.getblockheader(tip)['mediantime'] block = create_block( int(tip, 16), create_coinbase(height + 1), mtp + 1) block.nVersion = 4 block.solve() return block # Create 10 more anyone-can-spend utxo's for testing. def make_utxos(self): # Doesn't matter which node we use, just use node0. block = self.build_block_on_tip(self.nodes[0]) self.test_node.send_and_ping(msg_block(block)) assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256) self.nodes[0].generate(100) total_value = block.vtx[0].vout[0].nValue out_value = total_value // 10 tx = CTransaction() tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b'')) for i in range(10): tx.vout.append(CTxOut(out_value, CScript([OP_TRUE]))) tx.rehash() block2 = self.build_block_on_tip(self.nodes[0]) block2.vtx.append(tx) block2.hashMerkleRoot = block2.calc_merkle_root() block2.solve() self.test_node.send_and_ping(msg_block(block2)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256) self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)]) return # Test "sendcmpct" (between peers preferring the same version): # - No compact block announcements unless sendcmpct is sent. # - If sendcmpct is sent with version > preferred_version, the message is ignored. # - If sendcmpct is sent with boolean 0, then block announcements are not # made with compact blocks. # - If sendcmpct is then sent with boolean 1, then new block announcements # are made with compact blocks. # If old_node is passed in, request compact blocks with version=preferred-1 # and verify that it receives block announcements via compact block. def test_sendcmpct(self, node, test_node, preferred_version, old_node=None): # Make sure we get a SENDCMPCT message from our peer def received_sendcmpct(): return (len(test_node.last_sendcmpct) > 0) - got_message = wait_until(received_sendcmpct, timeout=30) - assert(received_sendcmpct()) - assert(got_message) + wait_until(received_sendcmpct, timeout=30, lock=mininode_lock) with mininode_lock: # Check that the first version received is the preferred one assert_equal( test_node.last_sendcmpct[0].version, preferred_version) # And that we receive versions down to 1. assert_equal(test_node.last_sendcmpct[-1].version, 1) test_node.last_sendcmpct = [] tip = int(node.getbestblockhash(), 16) def check_announcement_of_new_block(node, peer, predicate): peer.clear_block_announcement() block_hash = int(node.generate(1)[0], 16) peer.wait_for_block_announcement(block_hash, timeout=30) assert(peer.block_announced) - assert(got_message) with mininode_lock: assert predicate(peer), ( "block_hash={!r}, cmpctblock={!r}, inv={!r}".format( block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None))) # We shouldn't get any block announcements via cmpctblock yet. check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message) # Try one more time, this time after requesting headers. test_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message) # Test a few ways of using sendcmpct that should NOT # result in compact block announcements. # Before each test, sync the headers chain. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with too-high version sendcmpct = msg_sendcmpct() sendcmpct.version = 999 # was: preferred_version+1 sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with valid version, but announce=False sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Finally, try a SENDCMPCT message with announce=True sendcmpct.version = preferred_version sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time (no headers sync should be needed!) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time, after turning on sendheaders test_node.send_and_ping(msg_sendheaders()) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time, after sending a version-1, announce=false message. sendcmpct.version = preferred_version - 1 sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Now turn off announcements sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message) if old_node is not None: # Verify that a peer using an older protocol version can receive # announcements from this node. sendcmpct.version = 1 # preferred_version-1 sendcmpct.announce = True old_node.send_and_ping(sendcmpct) # Header sync old_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block( node, old_node, lambda p: "cmpctblock" in p.last_message) # This test actually causes bitcoind to (reasonably!) disconnect us, so do # this last. def test_invalid_cmpctblock_message(self): self.nodes[0].generate(101) block = self.build_block_on_tip(self.nodes[0]) cmpct_block = P2PHeaderAndShortIDs() cmpct_block.header = CBlockHeader(block) cmpct_block.prefilled_txn_length = 1 # This index will be too high prefilled_txn = PrefilledTransaction(1, block.vtx[0]) cmpct_block.prefilled_txn = [prefilled_txn] self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block)) assert_equal( int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock) # Compare the generated shortids to what we expect based on BIP 152, given # bitcoind's choice of nonce. def test_compactblock_construction(self, node, test_node): # Generate a bunch of transactions. node.generate(101) num_transactions = 25 address = node.getnewaddress() for i in range(num_transactions): txid = node.sendtoaddress(address, 0.1) hex_tx = node.gettransaction(txid)["hex"] tx = FromHex(CTransaction(), hex_tx) assert(tx.wit.is_null()) # Wait until we've seen the block announcement for the resulting tip tip = int(node.getbestblockhash(), 16) - assert(test_node.wait_for_block_announcement(tip)) + test_node.wait_for_block_announcement(tip) # Make sure we will receive a fast-announce compact block self.request_cb_announcements(test_node, node) # Now mine a block, and look at the resulting compact block. test_node.clear_block_announcement() block_hash = int(node.generate(1)[0], 16) # Store the raw block in our internal format. block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False)) [tx.calc_sha256() for tx in block.vtx] block.rehash() # Wait until the block was announced (via compact blocks) - wait_until(test_node.received_block_announcement, timeout=30) - assert(test_node.received_block_announcement()) + wait_until(test_node.received_block_announcement, + timeout=30, lock=mininode_lock) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: assert("cmpctblock" in test_node.last_message) # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs( test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block( header_and_shortids, block_hash, block) # Now fetch the compact block using a normal non-announce getdata with mininode_lock: test_node.clear_block_announcement() inv = CInv(4, block_hash) # 4 == "CompactBlock" test_node.send_message(msg_getdata([inv])) - wait_until(test_node.received_block_announcement, timeout=30) - assert(test_node.received_block_announcement()) + wait_until(test_node.received_block_announcement, + timeout=30, lock=mininode_lock) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: assert("cmpctblock" in test_node.last_message) # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs( test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block( header_and_shortids, block_hash, block) def check_compactblock_construction_from_block(self, header_and_shortids, block_hash, block): # Check that we got the right block! header_and_shortids.header.calc_sha256() assert_equal(header_and_shortids.header.sha256, block_hash) # Make sure the prefilled_txn appears to have included the coinbase assert(len(header_and_shortids.prefilled_txn) >= 1) assert_equal(header_and_shortids.prefilled_txn[0].index, 0) # Check that all prefilled_txn entries match what's in the block. for entry in header_and_shortids.prefilled_txn: entry.tx.calc_sha256() # This checks the non-witness parts of the tx agree assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256) # And this checks the witness wtxid = entry.tx.calc_sha256(True) # Shouldn't have received a witness assert(entry.tx.wit.is_null()) # Check that the cmpctblock message announced all the transactions. assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx)) # And now check that all the shortids are as expected as well. # Determine the siphash keys to use. [k0, k1] = header_and_shortids.get_siphash_keys() index = 0 while index < len(block.vtx): if (len(header_and_shortids.prefilled_txn) > 0 and header_and_shortids.prefilled_txn[0].index == index): # Already checked prefilled transactions above header_and_shortids.prefilled_txn.pop(0) else: tx_hash = block.vtx[index].sha256 shortid = calculate_shortid(k0, k1, tx_hash) assert_equal(shortid, header_and_shortids.shortids[0]) header_and_shortids.shortids.pop(0) index += 1 # Test that bitcoind requests compact blocks when we announce new blocks # via header or inv, and that responding to getblocktxn causes the block # to be successfully reconstructed. def test_compactblock_requests(self, node, test_node, version): # Try announcing a block with an inv or header, expect a compactblock # request for announce in ["inv", "header"]: block = self.build_block_on_tip(node) with mininode_lock: test_node.last_message.pop("getdata", None) if announce == "inv": test_node.send_message(msg_inv([CInv(2, block.sha256)])) - success = wait_until( - lambda: "getheaders" in test_node.last_message, timeout=30) - assert(success) + wait_until(lambda: "getheaders" in test_node.last_message, + timeout=30, lock=mininode_lock) test_node.send_header_for_blocks([block]) else: test_node.send_header_for_blocks([block]) - success = wait_until( - lambda: "getdata" in test_node.last_message, timeout=30) - assert(success) + wait_until(lambda: "getdata" in test_node.last_message, + timeout=30, lock=mininode_lock) assert_equal(len(test_node.last_message["getdata"].inv), 1) assert_equal(test_node.last_message["getdata"].inv[0].type, 4) assert_equal( test_node.last_message["getdata"].inv[0].hash, block.sha256) # Send back a compactblock message that omits the coinbase comp_block = HeaderAndShortIDs() comp_block.header = CBlockHeader(block) comp_block.nonce = 0 [k0, k1] = comp_block.get_siphash_keys() coinbase_hash = block.vtx[0].sha256 if version == 2: coinbase_hash = block.vtx[0].calc_sha256(True) comp_block.shortids = [ calculate_shortid(k0, k1, coinbase_hash)] test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # Expect a getblocktxn message. with mininode_lock: assert("getblocktxn" in test_node.last_message) absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute( ) assert_equal(absolute_indexes, [0]) # should be a coinbase request # Send the coinbase, and verify that the tip advances. if version == 2: msg = msg_witness_blocktxn() else: msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = [block.vtx[0]] test_node.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) # Create a chain of transactions from given utxo, and add to a new block. def build_block_with_transactions(self, node, utxo, num_transactions): block = self.build_block_on_tip(node) for i in range(num_transactions): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b'')) tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE]))) tx.rehash() utxo = [tx.sha256, 0, tx.vout[0].nValue] block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() return block # Test that we only receive getblocktxn requests for transactions that the # node needs, and that responding to them causes the block to be # reconstructed. def test_getblocktxn_requests(self, node, test_node, version): with_witness = (version == 2) def test_getblocktxn_response(compact_block, peer, expected_result): msg = msg_cmpctblock(compact_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert("getblocktxn" in peer.last_message) absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute( ) assert_equal(absolute_indexes, expected_result) def test_tip_after_message(node, peer, msg, tip): peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), tip) # First try announcing compactblocks that won't reconstruct, and verify # that we receive getblocktxn messages back. utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, use_witness=with_witness) test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5]) msg_bt = msg_blocktxn() if with_witness: msg_bt = msg_witness_blocktxn() # serialize with witnesses msg_bt.block_transactions = BlockTransactions( block.sha256, block.vtx[1:]) test_tip_after_message(node, test_node, msg_bt, block.sha256) utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Now try interspersing the prefilled transactions comp_block.initialize_from_block( block, prefill_list=[0, 1, 5], use_witness=with_witness) test_getblocktxn_response(comp_block, test_node, [2, 3, 4]) msg_bt.block_transactions = BlockTransactions( block.sha256, block.vtx[2:5]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now try giving one transaction ahead of time. utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) test_node.send_and_ping(msg_tx(block.vtx[1])) assert(block.vtx[1].hash in node.getrawmempool()) # Prefill 4 out of the 6 transactions, and verify that only the one # that was not in the mempool is requested. comp_block.initialize_from_block( block, prefill_list=[0, 2, 3, 4], use_witness=with_witness) test_getblocktxn_response(comp_block, test_node, [5]) msg_bt.block_transactions = BlockTransactions( block.sha256, [block.vtx[5]]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now provide all transactions to the node before the block is # announced and verify reconstruction happens immediately. utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 10) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) for tx in block.vtx[1:]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in block.vtx[1:]: assert(tx.hash in mempool) # Clear out last request. with mininode_lock: test_node.last_message.pop("getblocktxn", None) # Send compact block comp_block.initialize_from_block( block, prefill_list=[0], use_witness=with_witness) test_tip_after_message( node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256) with mininode_lock: # Shouldn't have gotten a request for any transaction assert("getblocktxn" not in test_node.last_message) # Incorrectly responding to a getblocktxn shouldn't cause the block to be # permanently failed. def test_incorrect_blocktxn_response(self, node, test_node, version): if (len(self.utxos) == 0): self.make_utxos() utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 10) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Relay the first 5 transactions from the block in advance for tx in block.vtx[1:6]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in block.vtx[1:6]: assert(tx.hash in mempool) # Send compact block comp_block = HeaderAndShortIDs() comp_block.initialize_from_block( block, prefill_list=[0], use_witness=(version == 2)) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) absolute_indexes = [] with mininode_lock: assert("getblocktxn" in test_node.last_message) absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute( ) assert_equal(absolute_indexes, [6, 7, 8, 9, 10]) # Now give an incorrect response. # Note that it's possible for bitcoind to be smart enough to know we're # lying, since it could check to see if the shortid matches what we're # sending, and eg disconnect us for misbehavior. If that behavior # change were made, we could just modify this test by having a # different peer provide the block further down, so that we're still # verifying that the block isn't marked bad permanently. This is good # enough for now. msg = msg_blocktxn() if version == 2: msg = msg_witness_blocktxn() msg.block_transactions = BlockTransactions( block.sha256, [block.vtx[5]] + block.vtx[7:]) test_node.send_and_ping(msg) # Tip should not have updated assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # We should receive a getdata request - success = wait_until( - lambda: "getdata" in test_node.last_message, timeout=10) - assert(success) + wait_until(lambda: "getdata" in test_node.last_message, + timeout=10, lock=mininode_lock) assert_equal(len(test_node.last_message["getdata"].inv), 1) assert(test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2 | MSG_WITNESS_FLAG) assert_equal( test_node.last_message["getdata"].inv[0].hash, block.sha256) # Deliver the block if version == 2: test_node.send_and_ping(msg_witness_block(block)) else: test_node.send_and_ping(msg_block(block)) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def test_getblocktxn_handler(self, node, test_node, version): # bitcoind will not send blocktxn responses for blocks whose height is # more than 10 blocks deep. MAX_GETBLOCKTXN_DEPTH = 10 chain_height = node.getblockcount() current_height = chain_height while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH): block_hash = node.getblockhash(current_height) block = FromHex(CBlock(), node.getblock(block_hash, False)) msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest( int(block_hash, 16), []) num_to_request = random.randint(1, len(block.vtx)) msg.block_txn_request.from_absolute( sorted(random.sample(range(len(block.vtx)), num_to_request))) test_node.send_message(msg) - success = wait_until( - lambda: "blocktxn" in test_node.last_message, timeout=10) - assert(success) + wait_until(lambda: "blocktxn" in test_node.last_message, + timeout=10, lock=mininode_lock) [tx.calc_sha256() for tx in block.vtx] with mininode_lock: assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int( block_hash, 16)) all_indices = msg.block_txn_request.to_absolute() for index in all_indices: tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop( 0) tx.calc_sha256() assert_equal(tx.sha256, block.vtx[index].sha256) if version == 1: # Witnesses should have been stripped assert(tx.wit.is_null()) else: # Check that the witness matches assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True)) test_node.last_message.pop("blocktxn", None) current_height -= 1 # Next request should send a full block response, as we're past the # allowed depth for a blocktxn response. block_hash = node.getblockhash(current_height) msg.block_txn_request = BlockTransactionsRequest( int(block_hash, 16), [0]) with mininode_lock: test_node.last_message.pop("block", None) test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with mininode_lock: test_node.last_message["block"].block.calc_sha256() assert_equal( test_node.last_message["block"].block.sha256, int(block_hash, 16)) assert "blocktxn" not in test_node.last_message def test_compactblocks_not_at_tip(self, node, test_node): # Test that requesting old compactblocks doesn't work. MAX_CMPCTBLOCK_DEPTH = 5 new_blocks = [] for i in range(MAX_CMPCTBLOCK_DEPTH + 1): test_node.clear_block_announcement() new_blocks.append(node.generate(1)[0]) - wait_until(test_node.received_block_announcement, timeout=30) + wait_until(test_node.received_block_announcement, + timeout=30, lock=mininode_lock) test_node.clear_block_announcement() test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) - success = wait_until( - lambda: "cmpctblock" in test_node.last_message, timeout=30) - assert(success) + wait_until(lambda: "cmpctblock" in test_node.last_message, + timeout=30, lock=mininode_lock) test_node.clear_block_announcement() node.generate(1) - wait_until(test_node.received_block_announcement, timeout=30) + wait_until(test_node.received_block_announcement, + timeout=30, lock=mininode_lock) test_node.clear_block_announcement() with mininode_lock: test_node.last_message.pop("block", None) test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) - success = wait_until( - lambda: "block" in test_node.last_message, timeout=30) - assert(success) + wait_until(lambda: "block" in test_node.last_message, + timeout=30, lock=mininode_lock) with mininode_lock: test_node.last_message["block"].block.calc_sha256() assert_equal( test_node.last_message["block"].block.sha256, int(new_blocks[0], 16)) # Generate an old compactblock, and verify that it's not accepted. cur_height = node.getblockcount() hashPrevBlock = int(node.getblockhash(cur_height - 5), 16) block = self.build_block_on_tip(node) block.hashPrevBlock = hashPrevBlock block.solve() comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) tips = node.getchaintips() found = False for x in tips: if x["hash"] == block.hash: assert_equal(x["status"], "headers-only") found = True break assert(found) # Requesting this block via getblocktxn should silently fail # (to avoid fingerprinting attacks). msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0]) with mininode_lock: test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with mininode_lock: assert "blocktxn" not in test_node.last_message def test_end_to_end_block_relay(self, node, listeners): utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 10) [l.clear_block_announcement() for l in listeners] node.submitblock(ToHex(block)) for l in listeners: - wait_until(lambda: l.received_block_announcement(), timeout=30) + wait_until(lambda: l.received_block_announcement(), + timeout=30, lock=mininode_lock) with mininode_lock: for l in listeners: assert "cmpctblock" in l.last_message l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256( ) assert_equal( l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256) # Test that we don't get disconnected if we relay a compact block with valid header, # but invalid transactions. def test_invalid_tx_in_compactblock(self, node, test_node): assert(len(self.utxos)) utxo = self.utxos[0] block = self.build_block_with_transactions(node, utxo, 5) del block.vtx[3] block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Now send the compact block with all transactions prefilled, and # verify that we don't get disconnected. comp_block = HeaderAndShortIDs() comp_block.initialize_from_block( block, prefill_list=[0, 1, 2, 3, 4], use_witness=False) msg = msg_cmpctblock(comp_block.to_p2p()) test_node.send_and_ping(msg) # Check that the tip didn't advance assert(int(node.getbestblockhash(), 16) is not block.sha256) test_node.sync_with_ping() # Helper for enabling cb announcements # Send the sendcmpct request and sync headers def request_cb_announcements(self, peer, node, version=1): tip = node.getbestblockhash() peer.get_headers(locator=[int(tip, 16)], hashstop=0) msg = msg_sendcmpct() msg.version = version msg.announce = True peer.send_and_ping(msg) def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer): assert(len(self.utxos)) def announce_cmpct_block(node, peer): utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) cmpct_block = HeaderAndShortIDs() cmpct_block.initialize_from_block(block) msg = msg_cmpctblock(cmpct_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert "getblocktxn" in peer.last_message return block, cmpct_block block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() mempool = node.getrawmempool() for tx in block.vtx[1:]: assert(tx.hash in mempool) delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.sha256) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Now test that delivering an invalid compact block won't break relay block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [CTxInWitness()] cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[ 0].scriptWitness.stack = [ser_uint256(0)] cmpct_block.use_witness = True delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) assert(int(node.getbestblockhash(), 16) != block.sha256) msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = block.vtx[1:] stalling_peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def run_test(self): # Setup the p2p connections and start up the network thread. self.test_node = TestNode() self.ex_softfork_node = TestNode() self.old_node = TestNode() # version 1 peer connections = [] connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node)) connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.ex_softfork_node, services=NODE_NETWORK)) connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.old_node, services=NODE_NETWORK)) self.test_node.add_connection(connections[0]) self.ex_softfork_node.add_connection(connections[1]) self.old_node.add_connection(connections[2]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here self.test_node.wait_for_verack() # We will need UTXOs to construct transactions in later tests. self.make_utxos() self.log.info("Running tests:") self.log.info("\tTesting SENDCMPCT p2p message... ") self.test_sendcmpct(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_sendcmpct( self.nodes[1], self.ex_softfork_node, 1, old_node=self.old_node) sync_blocks(self.nodes) self.log.info("\tTesting compactblock construction...") self.test_compactblock_construction(self.nodes[0], self.test_node) sync_blocks(self.nodes) self.test_compactblock_construction( self.nodes[1], self.ex_softfork_node) sync_blocks(self.nodes) self.log.info("\tTesting compactblock requests... ") self.test_compactblock_requests(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_compactblock_requests( self.nodes[1], self.ex_softfork_node, 2) sync_blocks(self.nodes) self.log.info("\tTesting getblocktxn requests...") self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_getblocktxn_requests(self.nodes[1], self.ex_softfork_node, 2) sync_blocks(self.nodes) self.log.info("\tTesting getblocktxn handler...") self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_getblocktxn_handler(self.nodes[1], self.ex_softfork_node, 2) self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) sync_blocks(self.nodes) self.log.info( "\tTesting compactblock requests/announcements not at chain tip...") self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node) sync_blocks(self.nodes) self.test_compactblocks_not_at_tip( self.nodes[1], self.ex_softfork_node) self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node) sync_blocks(self.nodes) self.log.info("\tTesting handling of incorrect blocktxn responses...") self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_incorrect_blocktxn_response( self.nodes[1], self.ex_softfork_node, 2) sync_blocks(self.nodes) # End-to-end block relay tests self.log.info("\tTesting end-to-end block relay...") self.request_cb_announcements(self.test_node, self.nodes[0]) self.request_cb_announcements(self.old_node, self.nodes[1]) self.request_cb_announcements( self.ex_softfork_node, self.nodes[1], version=2) self.test_end_to_end_block_relay( self.nodes[0], [self.ex_softfork_node, self.test_node, self.old_node]) self.test_end_to_end_block_relay( self.nodes[1], [self.ex_softfork_node, self.test_node, self.old_node]) self.log.info("\tTesting handling of invalid compact blocks...") self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node) self.test_invalid_tx_in_compactblock( self.nodes[1], self.ex_softfork_node) self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node) self.log.info( "\tTesting reconstructing compact blocks from all peers...") self.test_compactblock_reconstruction_multiple_peers( self.nodes[1], self.ex_softfork_node, self.old_node) sync_blocks(self.nodes) self.log.info("\tTesting invalid index in cmpctblock message...") self.test_invalid_cmpctblock_message() if __name__ == '__main__': CompactBlocksTest().main() diff --git a/test/functional/p2p-leaktests.py b/test/functional/p2p-leaktests.py index b4ce0b72e..7d660237f 100755 --- a/test/functional/p2p-leaktests.py +++ b/test/functional/p2p-leaktests.py @@ -1,178 +1,178 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * ''' Test for message sending before handshake completion A node should never send anything other than VERSION/VERACK/REJECT until it's received a VERACK. This test connects to a node and sends it a few messages, trying to intice it into sending us something it shouldn't. ''' banscore = 10 class CLazyNode(NodeConnCB): def __init__(self): super().__init__() self.unexpected_msg = False self.ever_connected = False def bad_message(self, message): self.unexpected_msg = True self.log.info("should not have received message: %s" % message.command) def on_open(self, conn): self.connected = True self.ever_connected = True def on_version(self, conn, message): self.bad_message(message) def on_verack(self, conn, message): self.bad_message(message) def on_reject(self, conn, message): self.bad_message(message) def on_inv(self, conn, message): self.bad_message(message) def on_addr(self, conn, message): self.bad_message(message) def on_alert(self, conn, message): self.bad_message(message) def on_getdata(self, conn, message): self.bad_message(message) def on_getblocks(self, conn, message): self.bad_message(message) def on_tx(self, conn, message): self.bad_message(message) def on_block(self, conn, message): self.bad_message(message) def on_getaddr(self, conn, message): self.bad_message(message) def on_headers(self, conn, message): self.bad_message(message) def on_getheaders(self, conn, message): self.bad_message(message) def on_ping(self, conn, message): self.bad_message(message) def on_mempool(self, conn): self.bad_message(message) def on_pong(self, conn, message): self.bad_message(message) def on_feefilter(self, conn, message): self.bad_message(message) def on_sendheaders(self, conn, message): self.bad_message(message) def on_sendcmpct(self, conn, message): self.bad_message(message) def on_cmpctblock(self, conn, message): self.bad_message(message) def on_getblocktxn(self, conn, message): self.bad_message(message) def on_blocktxn(self, conn, message): self.bad_message(message) # Node that never sends a version. We'll use this to send a bunch of messages # anyway, and eventually get disconnected. class CNodeNoVersionBan(CLazyNode): # send a bunch of veracks without sending a message. This should get us disconnected. # NOTE: implementation-specific check here. Remove if bitcoind ban # behavior changes def on_open(self, conn): super().on_open(conn) for i in range(banscore): self.send_message(msg_verack()) def on_reject(self, conn, message): pass # Node that never sends a version. This one just sits idle and hopes to receive # any message (it shouldn't!) class CNodeNoVersionIdle(CLazyNode): def __init__(self): super().__init__() # Node that sends a version but not a verack. class CNodeNoVerackIdle(CLazyNode): def __init__(self): self.version_received = False super().__init__() def on_reject(self, conn, message): pass def on_verack(self, conn, message): pass # When version is received, don't reply with a verack. Instead, see if the # node will give us a message that it shouldn't. This is not an exhaustive # list! def on_version(self, conn, message): self.version_received = True conn.send_message(msg_ping()) conn.send_message(msg_getaddr()) class P2PLeakTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 self.extra_args = [['-banscore=' + str(banscore)]] def run_test(self): no_version_bannode = CNodeNoVersionBan() no_version_idlenode = CNodeNoVersionIdle() no_verack_idlenode = CNodeNoVerackIdle() connections = [] connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_bannode, send_version=False)) connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_idlenode, send_version=False)) connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_verack_idlenode)) no_version_bannode.add_connection(connections[0]) no_version_idlenode.add_connection(connections[1]) no_verack_idlenode.add_connection(connections[2]) NetworkThread().start() # Start up network handling in another thread - assert wait_until( - lambda: no_version_bannode.ever_connected, timeout=10) - assert wait_until( - lambda: no_version_idlenode.ever_connected, timeout=10) - assert wait_until( - lambda: no_verack_idlenode.version_received, timeout=10) + wait_until(lambda: no_version_bannode.ever_connected, + timeout=10, lock=mininode_lock) + wait_until(lambda: no_version_idlenode.ever_connected, + timeout=10, lock=mininode_lock) + wait_until(lambda: no_verack_idlenode.version_received, + timeout=10, lock=mininode_lock) # Mine a block and make sure that it's not sent to the connected nodes self.nodes[0].generate(1) # Give the node enough time to possibly leak out a message time.sleep(5) # This node should have been banned assert not no_version_bannode.connected [conn.disconnect_node() for conn in connections] # Make sure no unexpected messages came in assert(no_version_bannode.unexpected_msg == False) assert(no_version_idlenode.unexpected_msg == False) assert(no_verack_idlenode.unexpected_msg == False) if __name__ == '__main__': P2PLeakTest().main() diff --git a/test/functional/sendheaders.py b/test/functional/sendheaders.py index 166c5b865..df583f687 100755 --- a/test/functional/sendheaders.py +++ b/test/functional/sendheaders.py @@ -1,597 +1,597 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.blocktools import create_block, create_coinbase ''' SendHeadersTest -- test behavior of headers messages to announce blocks. Setup: - Two nodes, two p2p connections to node0. One p2p connection should only ever receive inv's (omitted from testing description below, this is our control). Second node is used for creating reorgs. Part 1: No headers announcements before "sendheaders" a. node mines a block [expect: inv] send getdata for the block [expect: block] b. node mines another block [expect: inv] send getheaders and getdata [expect: headers, then block] c. node mines another block [expect: inv] peer mines a block, announces with header [expect: getdata] d. node mines another block [expect: inv] Part 2: After "sendheaders", headers announcements should generally work. a. peer sends sendheaders [expect: no response] peer sends getheaders with current tip [expect: no response] b. node mines a block [expect: tip header] c. for N in 1, ..., 10: * for announce-type in {inv, header} - peer mines N blocks, announces with announce-type [ expect: getheaders/getdata or getdata, deliver block(s) ] - node mines a block [ expect: 1 header ] Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer. - For response-type in {inv, getheaders} * node mines a 7 block reorg [ expect: headers announcement of 8 blocks ] * node mines an 8-block reorg [ expect: inv at tip ] * peer responds with getblocks/getdata [expect: inv, blocks ] * node mines another block [ expect: inv at tip, peer sends getdata, expect: block ] * node mines another block at tip [ expect: inv ] * peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers] * peer requests block [ expect: block ] * node mines another block at tip [ expect: inv, peer sends getdata, expect: block ] * peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block] * node mines 1 block [expect: 1 header, peer responds with getdata] Part 4: Test direct fetch behavior a. Announce 2 old block headers. Expect: no getdata requests. b. Announce 3 new blocks via 1 headers message. Expect: one getdata request for all 3 blocks. (Send blocks.) c. Announce 1 header that forks off the last two blocks. Expect: no response. d. Announce 1 more header that builds on that fork. Expect: one getdata request for two blocks. e. Announce 16 more headers that build on that fork. Expect: getdata request for 14 more blocks. f. Announce 1 more header that builds on that fork. Expect: no response. Part 5: Test handling of headers that don't connect. a. Repeat 10 times: 1. Announce a header that doesn't connect. Expect: getheaders message 2. Send headers chain. Expect: getdata for the missing blocks, tip update. b. Then send 9 more headers that don't connect. Expect: getheaders message each time. c. Announce a header that does connect. Expect: no response. d. Announce 49 headers that don't connect. Expect: getheaders message each time. e. Announce one more that doesn't connect. Expect: disconnect. ''' direct_fetch_response_time = 0.05 class TestNode(NodeConnCB): def __init__(self): super().__init__() self.block_announced = False self.last_blockhash_announced = None def clear_last_announcement(self): with mininode_lock: self.block_announced = False self.last_message.pop("inv", None) self.last_message.pop("headers", None) # Request data for a list of block hashes def get_data(self, block_hashes): msg = msg_getdata() for x in block_hashes: msg.inv.append(CInv(2, x)) self.connection.send_message(msg) def get_headers(self, locator, hashstop): msg = msg_getheaders() msg.locator.vHave = locator msg.hashstop = hashstop self.connection.send_message(msg) def send_block_inv(self, blockhash): msg = msg_inv() msg.inv = [CInv(2, blockhash)] self.connection.send_message(msg) def on_inv(self, conn, message): self.block_announced = True self.last_blockhash_announced = message.inv[-1].hash def on_headers(self, conn, message): if len(message.headers): self.block_announced = True message.headers[-1].calc_sha256() self.last_blockhash_announced = message.headers[-1].sha256 def on_block(self, conn, message): self.last_message["block"].calc_sha256() # Test whether the last announcement we received had the # right header or the right inv # inv and headers should be lists of block hashes def check_last_announcement(self, headers=None, inv=None): expect_headers = headers if headers != None else [] expect_inv = inv if inv != None else [] def test_function(): return self.block_announced - assert(wait_until(test_function, timeout=60)) + wait_until(test_function, timeout=60, lock=mininode_lock) with mininode_lock: self.block_announced = False success = True compare_inv = [] if "inv" in self.last_message: compare_inv = [x.hash for x in self.last_message["inv"].inv] if compare_inv != expect_inv: success = False hash_headers = [] if "headers" in self.last_message: # treat headers as a list of block hashes hash_headers = [ x.sha256 for x in self.last_message["headers"].headers] if hash_headers != expect_headers: success = False self.last_message.pop("inv", None) self.last_message.pop("headers", None) return success def wait_for_getdata(self, hash_list, timeout=60): if hash_list == []: return def test_function(): return "getdata" in self.last_message and [ x.hash for x in self.last_message["getdata"].inv] == hash_list - assert(wait_until(test_function, timeout=timeout)) + wait_until(test_function, timeout=timeout, lock=mininode_lock) return def wait_for_block_announcement(self, block_hash, timeout=60): def test_function(): return self.last_blockhash_announced == block_hash - assert(wait_until(test_function, timeout=timeout)) + wait_until(test_function, timeout=timeout, lock=mininode_lock) return def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) def send_getblocks(self, locator): getblocks_message = msg_getblocks() getblocks_message.locator.vHave = locator self.send_message(getblocks_message) class SendHeadersTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 2 # mine count blocks and return the new tip def mine_blocks(self, count): # Clear out last block announcement from each p2p listener [x.clear_last_announcement() for x in self.p2p_connections] self.nodes[0].generate(count) return int(self.nodes[0].getbestblockhash(), 16) # mine a reorg that invalidates length blocks (replacing them with # length+1 blocks). # Note: we clear the state of our p2p connections after the # to-be-reorged-out blocks are mined, so that we don't break later tests. # return the list of block hashes newly mined def mine_reorg(self, length): self.nodes[0].generate( length) # make sure all invalidated blocks are node0's sync_blocks(self.nodes, wait=0.1) for x in self.p2p_connections: x.wait_for_block_announcement( int(self.nodes[0].getbestblockhash(), 16)) x.clear_last_announcement() tip_height = self.nodes[1].getblockcount() hash_to_invalidate = self.nodes[ 1].getblockhash(tip_height - (length - 1)) self.nodes[1].invalidateblock(hash_to_invalidate) all_hashes = self.nodes[1].generate( length + 1) # Must be longer than the orig chain sync_blocks(self.nodes, wait=0.1) return [int(x, 16) for x in all_hashes] def run_test(self): # Setup the p2p connections and start up the network thread. inv_node = TestNode() test_node = TestNode() self.p2p_connections = [inv_node, test_node] connections = [] connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node)) # Set nServices to 0 for test_node, so no block download will occur outside of # direct fetching connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0)) inv_node.add_connection(connections[0]) test_node.add_connection(connections[1]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here inv_node.wait_for_verack() test_node.wait_for_verack() tip = int(self.nodes[0].getbestblockhash(), 16) # PART 1 # 1. Mine a block; expect inv announcements each time self.log.info( "Part 1: headers don't start before sendheaders message...") for i in range(4): old_tip = tip tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(inv=[tip]), True) # Try a few different responses; none should affect next # announcement if i == 0: # first request the block test_node.get_data([tip]) test_node.wait_for_block(tip, timeout=5) elif i == 1: # next try requesting header and block test_node.get_headers(locator=[old_tip], hashstop=tip) test_node.get_data([tip]) test_node.wait_for_block(tip) test_node.clear_last_announcement( ) # since we requested headers... elif i == 2: # this time announce own block via headers height = self.nodes[0].getblockcount() last_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] block_time = last_time + 1 new_block = create_block( tip, create_coinbase(height + 1), block_time) new_block.solve() test_node.send_header_for_blocks([new_block]) test_node.wait_for_getdata([new_block.sha256], timeout=5) test_node.send_message(msg_block(new_block)) test_node.sync_with_ping() # make sure this block is processed inv_node.clear_last_announcement() test_node.clear_last_announcement() self.log.info("Part 1: success!") self.log.info( "Part 2: announce blocks with headers after sendheaders message...") # PART 2 # 2. Send a sendheaders message and test that headers announcements # commence and keep working. test_node.send_message(msg_sendheaders()) prev_tip = int(self.nodes[0].getbestblockhash(), 16) test_node.get_headers(locator=[prev_tip], hashstop=0) test_node.sync_with_ping() # Now that we've synced headers, headers announcements should work tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(headers=[tip]), True) height = self.nodes[0].getblockcount() + 1 block_time += 10 # Advance far enough ahead for i in range(10): # Mine i blocks, and alternate announcing either via # inv (of tip) or via headers. After each, new blocks # mined by the node should successfully be announced # with block header, even though the blocks are never requested for j in range(2): blocks = [] for b in range(i + 1): blocks.append( create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 if j == 0: # Announce via inv test_node.send_block_inv(tip) test_node.wait_for_getheaders(timeout=5) # Should have received a getheaders now test_node.send_header_for_blocks(blocks) # Test that duplicate inv's won't result in duplicate # getdata requests, or duplicate headers announcements [inv_node.send_block_inv(x.sha256) for x in blocks] test_node.wait_for_getdata( [x.sha256 for x in blocks], timeout=5) inv_node.sync_with_ping() else: # Announce via headers test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata( [x.sha256 for x in blocks], timeout=5) # Test that duplicate headers won't result in duplicate # getdata requests (the check is further down) inv_node.send_header_for_blocks(blocks) inv_node.sync_with_ping() [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() inv_node.sync_with_ping() # This block should not be announced to the inv node (since it also # broadcast it) assert "inv" not in inv_node.last_message assert "headers" not in inv_node.last_message tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal( test_node.check_last_announcement(headers=[tip]), True) height += 1 block_time += 1 self.log.info("Part 2: success!") self.log.info( "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...") # PART 3. Headers announcements can stop after large reorg, and resume after # getheaders or inv from peer. for j in range(2): # First try mining a reorg that can propagate with header # announcement new_block_hashes = self.mine_reorg(length=7) tip = new_block_hashes[-1] assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal( test_node.check_last_announcement(headers=new_block_hashes), True) block_time += 8 # Mine a too-large reorg, which should be announced with a single # inv new_block_hashes = self.mine_reorg(length=8) tip = new_block_hashes[-1] assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(inv=[tip]), True) block_time += 9 fork_point = self.nodes[0].getblock( "%02x" % new_block_hashes[0])["previousblockhash"] fork_point = int(fork_point, 16) # Use getblocks/getdata test_node.send_getblocks(locator=[fork_point]) assert_equal( test_node.check_last_announcement(inv=new_block_hashes), True) test_node.get_data(new_block_hashes) test_node.wait_for_block(new_block_hashes[-1]) for i in range(3): # Mine another block, still should get only an inv tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal( test_node.check_last_announcement(inv=[tip]), True) if i == 0: # Just get the data -- shouldn't cause headers # announcements to resume test_node.get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # Send a getheaders message that shouldn't trigger headers announcements # to resume (best header sent will be too old) test_node.get_headers(locator=[ fork_point], hashstop=new_block_hashes[1]) test_node.get_data([tip]) test_node.wait_for_block(tip) elif i == 2: test_node.get_data([tip]) test_node.wait_for_block(tip) # This time, try sending either a getheaders to trigger resumption # of headers announcements, or mine a new block and inv it, also # triggering resumption of headers announcements. if j == 0: test_node.get_headers(locator=[tip], hashstop=0) test_node.sync_with_ping() else: test_node.send_block_inv(tip) test_node.sync_with_ping() # New blocks should now be announced with header tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal( test_node.check_last_announcement(headers=[tip]), True) self.log.info("Part 3: success!") self.log.info("Part 4: Testing direct fetch behavior...") tip = self.mine_blocks(1) height = self.nodes[0].getblockcount() + 1 last_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] block_time = last_time + 1 # Create 2 blocks. Send the blocks, then send the headers. blocks = [] for b in range(2): blocks.append( create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 inv_node.send_message(msg_block(blocks[-1])) inv_node.sync_with_ping() # Make sure blocks are processed test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() # should not have received any getdata messages with mininode_lock: assert "getdata" not in test_node.last_message # This time, direct fetch should work blocks = [] for b in range(3): blocks.append( create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() test_node.wait_for_getdata( [x.sha256 for x in blocks], timeout=direct_fetch_response_time) [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() # Now announce a header that forks the last two blocks tip = blocks[0].sha256 height -= 1 blocks = [] # Create extra blocks for later for b in range(20): blocks.append( create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 # Announcing one block on fork should not trigger direct fetch # (less work than tip) test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[0:1]) test_node.sync_with_ping() with mininode_lock: assert "getdata" not in test_node.last_message # Announcing one more block on fork should trigger direct fetch for # both blocks (same work as tip) test_node.send_header_for_blocks(blocks[1:2]) test_node.sync_with_ping() test_node.wait_for_getdata( [x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time) # Announcing 16 more headers should trigger direct fetch for 14 more # blocks test_node.send_header_for_blocks(blocks[2:18]) test_node.sync_with_ping() test_node.wait_for_getdata( [x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time) # Announcing 1 more header should not trigger any response test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[18:19]) test_node.sync_with_ping() with mininode_lock: assert "getdata" not in test_node.last_message self.log.info("Part 4: success!") # Now deliver all those blocks we announced. [test_node.send_message(msg_block(x)) for x in blocks] self.log.info("Part 5: Testing handling of unconnecting headers") # First we test that receipt of an unconnecting header doesn't prevent # chain sync. for i in range(10): test_node.last_message.pop("getdata", None) blocks = [] # Create two more blocks. for j in range(2): blocks.append( create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 # Send the header of the second block -> this won't connect. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[1]]) test_node.wait_for_getheaders(timeout=1) test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() assert_equal( int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) blocks = [] # Now we test that if we repeatedly don't send connecting headers, we # don't go into an infinite loop trying to get them to connect. MAX_UNCONNECTING_HEADERS = 10 for j in range(MAX_UNCONNECTING_HEADERS + 1): blocks.append( create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 for i in range(1, MAX_UNCONNECTING_HEADERS): # Send a header that doesn't connect, check that we get a # getheaders. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i]]) test_node.wait_for_getheaders(timeout=1) # Next header will connect, should re-set our count: test_node.send_header_for_blocks([blocks[0]]) # Remove the first two entries (blocks[1] would connect): blocks = blocks[2:] # Now try to see how many unconnecting headers we can send # before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS for i in range(5 * MAX_UNCONNECTING_HEADERS - 1): # Send a header that doesn't connect, check that we get a # getheaders. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i % len(blocks)]]) test_node.wait_for_getheaders(timeout=1) # Eventually this stops working. test_node.send_header_for_blocks([blocks[-1]]) # Should get disconnected test_node.wait_for_disconnect() self.log.info("Part 5: success!") # Finally, check that the inv node never received a getdata request, # throughout the test assert "getdata" not in inv_node.last_message if __name__ == '__main__': SendHeadersTest().main() diff --git a/test/functional/test_framework/comptool.py b/test/functional/test_framework/comptool.py index 6dd413f9f..3ae33ca53 100755 --- a/test/functional/test_framework/comptool.py +++ b/test/functional/test_framework/comptool.py @@ -1,444 +1,444 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from .mininode import * from .blockstore import BlockStore, TxStore -from .util import p2p_port +from .util import p2p_port, wait_until import logging logger = logging.getLogger("TestFramework.comptool") ''' This is a tool for comparing two or more bitcoinds to each other using a script provided. To use, create a class that implements get_tests(), and pass it in as the test generator to TestManager. get_tests() should be a python generator that returns TestInstance objects. See below for definition. ''' # TestNode behaves as follows: # Configure with a BlockStore and TxStore # on_inv: log the message but don't request # on_headers: log the chain tip # on_pong: update ping response map (for synchronization) # on_getheaders: provide headers via BlockStore # on_getdata: provide blocks via BlockStore global mininode_lock class RejectResult(): """Outcome that expects rejection of a transaction or block.""" def __init__(self, code, reason=b''): self.code = code self.reason = reason def match(self, other): if self.code != other.code: return False return other.reason.startswith(self.reason) def __repr__(self): return '%i:%s' % (self.code, self.reason or '*') class TestNode(NodeConnCB): def __init__(self, block_store, tx_store): super().__init__() self.conn = None self.bestblockhash = None self.block_store = block_store self.block_request_map = {} self.tx_store = tx_store self.tx_request_map = {} self.block_reject_map = {} self.tx_reject_map = {} # When the pingmap is non-empty we're waiting for # a response self.pingMap = {} self.lastInv = [] self.closed = False def on_close(self, conn): self.closed = True def add_connection(self, conn): self.conn = conn def on_headers(self, conn, message): if len(message.headers) > 0: best_header = message.headers[-1] best_header.calc_sha256() self.bestblockhash = best_header.sha256 def on_getheaders(self, conn, message): response = self.block_store.headers_for( message.locator, message.hashstop) if response is not None: conn.send_message(response) def on_getdata(self, conn, message): [conn.send_message(r) for r in self.block_store.get_blocks(message.inv)] [conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)] for i in message.inv: if i.type == 1: self.tx_request_map[i.hash] = True elif i.type == 2: self.block_request_map[i.hash] = True def on_inv(self, conn, message): self.lastInv = [x.hash for x in message.inv] def on_pong(self, conn, message): try: del self.pingMap[message.nonce] except KeyError: raise AssertionError( "Got pong for unknown ping [%s]" % repr(message)) def on_reject(self, conn, message): if message.message == b'tx': self.tx_reject_map[message.data] = RejectResult( message.code, message.reason) if message.message == b'block': self.block_reject_map[message.data] = RejectResult( message.code, message.reason) def send_inv(self, obj): mtype = 2 if isinstance(obj, CBlock) else 1 self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)])) def send_getheaders(self): # We ask for headers from their last tip. m = msg_getheaders() m.locator = self.block_store.get_locator(self.bestblockhash) self.conn.send_message(m) def send_header(self, header): m = msg_headers() m.headers.append(header) self.conn.send_message(m) # This assumes BIP31 def send_ping(self, nonce): self.pingMap[nonce] = True self.conn.send_message(msg_ping(nonce)) def received_ping_response(self, nonce): return nonce not in self.pingMap def send_mempool(self): self.lastInv = [] self.conn.send_message(msg_mempool()) # TestInstance: # # Instances of these are generated by the test generator, and fed into the # comptool. # # "blocks_and_transactions" should be an array of # [obj, True/False/None, hash/None]: # - obj is either a CBlock, CBlockHeader, or a CTransaction, and # - the second value indicates whether the object should be accepted # into the blockchain or mempool (for tests where we expect a certain # answer), or "None" if we don't expect a certain answer and are just # comparing the behavior of the nodes being tested. # - the third value is the hash to test the tip against (if None or omitted, # use the hash of the block) # - NOTE: if a block header, no test is performed; instead the header is # just added to the block_store. This is to facilitate block delivery # when communicating with headers-first clients (when withholding an # intermediate block). # sync_every_block: if True, then each block will be inv'ed, synced, and # nodes will be tested based on the outcome for the block. If False, # then inv's accumulate until all blocks are processed (or max inv size # is reached) and then sent out in one inv message. Then the final block # will be synced across all connections, and the outcome of the final # block will be tested. # sync_every_tx: analogous to behavior for sync_every_block, except if outcome # on the final tx is None, then contents of entire mempool are compared # across all connections. (If outcome of final tx is specified as true # or false, then only the last tx is tested against outcome.) class TestInstance(): def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False): self.blocks_and_transactions = objects if objects else [] self.sync_every_block = sync_every_block self.sync_every_tx = sync_every_tx class TestManager(): def __init__(self, testgen, datadir): self.test_generator = testgen self.connections = [] self.test_nodes = [] self.block_store = BlockStore(datadir) self.tx_store = TxStore(datadir) self.ping_counter = 1 def add_all_connections(self, nodes): for i in range(len(nodes)): # Create a p2p connection to each node test_node = TestNode(self.block_store, self.tx_store) self.test_nodes.append(test_node) self.connections.append( NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node)) # Make sure the TestNode (callback class) has a reference to its # associated NodeConn test_node.add_connection(self.connections[-1]) def clear_all_connections(self): self.connections = [] self.test_nodes = [] def wait_for_disconnections(self): def disconnected(): return all(node.closed for node in self.test_nodes) - return wait_until(disconnected, timeout=10) + wait_until(disconnected, timeout=10, lock=mininode_lock) def wait_for_verack(self): return all(node.wait_for_verack() for node in self.test_nodes) def wait_for_pings(self, counter): def received_pongs(): return all(node.received_ping_response(counter) for node in self.test_nodes) - return wait_until(received_pongs) + wait_until(received_pongs, lock=mininode_lock) # sync_blocks: Wait for all connections to request the blockhash given # then send get_headers to find out the tip of each node, and synchronize # the response by using a ping (and waiting for pong with same nonce). def sync_blocks(self, blockhash, num_blocks): def blocks_requested(): return all( blockhash in node.block_request_map and node.block_request_map[ blockhash] for node in self.test_nodes ) # --> error if not requested - if not wait_until(blocks_requested, attempts=20 * num_blocks): - raise AssertionError("Not all nodes requested block") + wait_until(blocks_requested, attempts=20 * + num_blocks, lock=mininode_lock) # Send getheaders message [c.cb.send_getheaders() for c in self.connections] # Send ping and wait for response -- synchronization hack [c.cb.send_ping(self.ping_counter) for c in self.connections] self.wait_for_pings(self.ping_counter) self.ping_counter += 1 # Analogous to sync_block (see above) def sync_transaction(self, txhash, num_events): # Wait for nodes to request transaction (50ms sleep * 20 tries * # num_events) def transaction_requested(): return all( txhash in node.tx_request_map and node.tx_request_map[txhash] for node in self.test_nodes ) # --> error if not requested - if not wait_until(transaction_requested, attempts=20 * num_events): - raise AssertionError("Not all nodes requested transaction") + wait_until(transaction_requested, attempts=20 * + num_events, lock=mininode_lock) # Get the mempool [c.cb.send_mempool() for c in self.connections] # Send ping and wait for response -- synchronization hack [c.cb.send_ping(self.ping_counter) for c in self.connections] self.wait_for_pings(self.ping_counter) self.ping_counter += 1 # Sort inv responses from each node with mininode_lock: [c.cb.lastInv.sort() for c in self.connections] # Verify that the tip of each connection all agree with each other, and # with the expected outcome (if given) def check_results(self, blockhash, outcome): with mininode_lock: for c in self.connections: if outcome is None: if c.cb.bestblockhash != self.connections[0].cb.bestblockhash: return False # Check that block was rejected w/ code elif isinstance(outcome, RejectResult): if c.cb.bestblockhash == blockhash: return False if blockhash not in c.cb.block_reject_map: logger.error( 'Block not in reject map: %064x' % (blockhash)) return False if not outcome.match(c.cb.block_reject_map[blockhash]): logger.error('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash)) return False elif ((c.cb.bestblockhash == blockhash) != outcome): return False return True # Either check that the mempools all agree with each other, or that # txhash's presence in the mempool matches the outcome specified. # This is somewhat of a strange comparison, in that we're either comparing # a particular tx to an outcome, or the entire mempools altogether; # perhaps it would be useful to add the ability to check explicitly that # a particular tx's existence in the mempool is the same across all nodes. def check_mempool(self, txhash, outcome): with mininode_lock: for c in self.connections: if outcome is None: # Make sure the mempools agree with each other if c.cb.lastInv != self.connections[0].cb.lastInv: return False # Check that tx was rejected w/ code elif isinstance(outcome, RejectResult): if txhash in c.cb.lastInv: return False if txhash not in c.cb.tx_reject_map: logger.error('Tx not in reject map: %064x' % (txhash)) return False if not outcome.match(c.cb.tx_reject_map[txhash]): logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash)) return False elif ((txhash in c.cb.lastInv) != outcome): return False return True def run(self): # Wait until verack is received self.wait_for_verack() test_number = 1 for test_instance in self.test_generator.get_tests(): # We use these variables to keep track of the last block # and last transaction in the tests, which are used # if we're not syncing on every block or every tx. [block, block_outcome, tip] = [None, None, None] [tx, tx_outcome] = [None, None] invqueue = [] for test_obj in test_instance.blocks_and_transactions: b_or_t = test_obj[0] outcome = test_obj[1] # Determine if we're dealing with a block or tx if isinstance(b_or_t, CBlock): # Block test runner block = b_or_t block_outcome = outcome tip = block.sha256 # each test_obj can have an optional third argument # to specify the tip we should compare with # (default is to use the block being tested) if len(test_obj) >= 3: tip = test_obj[2] # Add to shared block_store, set as current block # If there was an open getdata request for the block # previously, and we didn't have an entry in the # block_store, then immediately deliver, because the # node wouldn't send another getdata request while # the earlier one is outstanding. first_block_with_hash = True if self.block_store.get(block.sha256) is not None: first_block_with_hash = False with mininode_lock: self.block_store.add_block(block) for c in self.connections: if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True: # There was a previous request for this block hash # Most likely, we delivered a header for this block # but never had the block to respond to the # getdata c.send_message(msg_block(block)) else: c.cb.block_request_map[block.sha256] = False # Either send inv's to each node and sync, or add # to invqueue for later inv'ing. if (test_instance.sync_every_block): # if we expect success, send inv and sync every block # if we expect failure, just push the block and see # what happens. if outcome == True: [c.cb.send_inv(block) for c in self.connections] self.sync_blocks(block.sha256, 1) else: [c.send_message(msg_block(block)) for c in self.connections] [c.cb.send_ping(self.ping_counter) for c in self.connections] self.wait_for_pings(self.ping_counter) self.ping_counter += 1 if (not self.check_results(tip, outcome)): raise AssertionError( "Test failed at test %d" % test_number) else: invqueue.append(CInv(2, block.sha256)) elif isinstance(b_or_t, CBlockHeader): block_header = b_or_t self.block_store.add_header(block_header) [c.cb.send_header(block_header) for c in self.connections] else: # Tx test runner assert(isinstance(b_or_t, CTransaction)) tx = b_or_t tx_outcome = outcome # Add to shared tx store and clear map entry with mininode_lock: self.tx_store.add_transaction(tx) for c in self.connections: c.cb.tx_request_map[tx.sha256] = False # Again, either inv to all nodes or save for later if (test_instance.sync_every_tx): [c.cb.send_inv(tx) for c in self.connections] self.sync_transaction(tx.sha256, 1) if (not self.check_mempool(tx.sha256, outcome)): raise AssertionError( "Test failed at test %d" % test_number) else: invqueue.append(CInv(1, tx.sha256)) # Ensure we're not overflowing the inv queue if len(invqueue) == MAX_INV_SZ: [c.send_message(msg_inv(invqueue)) for c in self.connections] invqueue = [] # Do final sync if we weren't syncing on every block or every tx. if (not test_instance.sync_every_block and block is not None): if len(invqueue) > 0: [c.send_message(msg_inv(invqueue)) for c in self.connections] invqueue = [] self.sync_blocks( block.sha256, len(test_instance.blocks_and_transactions)) if (not self.check_results(tip, block_outcome)): raise AssertionError( "Block test failed at test %d" % test_number) if (not test_instance.sync_every_tx and tx is not None): if len(invqueue) > 0: [c.send_message(msg_inv(invqueue)) for c in self.connections] invqueue = [] self.sync_transaction( tx.sha256, len(test_instance.blocks_and_transactions)) if (not self.check_mempool(tx.sha256, tx_outcome)): raise AssertionError( "Mempool test failed at test %d" % test_number) logger.info("Test %d: PASS" % test_number) test_number += 1 [c.disconnect_node() for c in self.connections] self.wait_for_disconnections() self.block_store.close() self.tx_store.close() diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index 5d993e0d1..510d23c75 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -1,1948 +1,1930 @@ #!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik # Copyright (c) 2010-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # mininode.py - Bitcoin P2P network half-a-node # # This python code was modified from ArtForz' public domain half-a-node, as # found in the mini-node branch of http://github.com/jgarzik/pynode. # # NodeConn: an object which manages p2p connectivity to a bitcoin node # NodeConnCB: a base class that describes the interface for receiving # callbacks with network messages from a NodeConn # CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....: # data structures that should map to corresponding structures in # bitcoin/primitives # msg_block, msg_tx, msg_headers, etc.: # data structures that represent network messages # ser_*, deser_*: functions that handle serialization/deserialization import asyncore from codecs import encode from collections import defaultdict import copy import hashlib from io import BytesIO import logging import random import socket import struct import sys import time from threading import RLock, Thread from test_framework.siphash import siphash256 from test_framework.cdefs import MAX_BLOCK_SIGOPS_PER_MB -from test_framework.util import hex_str_to_bytes, bytes_to_hex_str +from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, wait_until BIP0031_VERSION = 60000 MY_VERSION = 70014 # past bip-31 for ping/pong MY_SUBVERSION = b"/python-mininode-tester:0.0.3/" # from version 70001 onwards, fRelay should be appended to version messages (BIP37) MY_RELAY = 1 MAX_INV_SZ = 50000 COIN = 100000000 # 1 btc in satoshis NODE_NETWORK = (1 << 0) NODE_GETUTXO = (1 << 1) NODE_BLOOM = (1 << 2) NODE_WITNESS = (1 << 3) NODE_XTHIN = (1 << 4) NODE_BITCOIN_CASH = (1 << 5) # Howmuch data will be read from the network at once READ_BUFFER_SIZE = 8192 logger = logging.getLogger("TestFramework.mininode") # Keep our own socket map for asyncore, so that we can track disconnects # ourselves (to workaround an issue with closing an asyncore socket when # using select) mininode_socket_map = dict() # One lock for synchronizing all data access between the networking thread (see # NetworkThread below) and the thread running the test logic. For simplicity, # NodeConn acquires this lock whenever delivering a message to to a NodeConnCB, # and whenever adding anything to the send buffer (in send_message()). This # lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the NodeConnCB or NodeConn. mininode_lock = RLock() # Serialization/deserialization tools def sha256(s): return hashlib.new('sha256', s).digest() def ripemd160(s): return hashlib.new('ripemd160', s).digest() def hash256(s): return sha256(sha256(s)) def ser_compact_size(l): r = b"" if l < 253: r = struct.pack("B", l) elif l < 0x10000: r = struct.pack(">= 32 return rs def uint256_from_str(s): r = 0 t = struct.unpack("> 24) & 0xFF v = (c & 0xFFFFFF) << (8 * (nbytes - 3)) return v def deser_vector(f, c): nit = deser_compact_size(f) r = [] for i in range(nit): t = c() t.deserialize(f) r.append(t) return r # ser_function_name: Allow for an alternate serialization function on the # entries in the vector (we use this for serializing the vector of transactions # for a witness block). def ser_vector(l, ser_function_name=None): r = ser_compact_size(len(l)) for i in l: if ser_function_name: r += getattr(i, ser_function_name)() else: r += i.serialize() return r def deser_uint256_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_uint256(f) r.append(t) return r def ser_uint256_vector(l): r = ser_compact_size(len(l)) for i in l: r += ser_uint256(i) return r def deser_string_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_string(f) r.append(t) return r def ser_string_vector(l): r = ser_compact_size(len(l)) for sv in l: r += ser_string(sv) return r def deser_int_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = struct.unpack("H", f.read(2))[0] def serialize(self): r = b"" r += struct.pack("H", self.port) return r def __repr__(self): return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices, self.ip, self.port) MSG_WITNESS_FLAG = 1 << 30 class CInv(): typemap = { 0: "Error", 1: "TX", 2: "Block", 1 | MSG_WITNESS_FLAG: "WitnessTx", 2 | MSG_WITNESS_FLAG: "WitnessBlock", 4: "CompactBlock" } def __init__(self, t=0, h=0): self.type = t self.hash = h def deserialize(self, f): self.type = struct.unpack(" 21000000 * COIN: return False return True def __repr__(self): return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \ % (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime) class CBlockHeader(): def __init__(self, header=None): if header is None: self.set_null() else: self.nVersion = header.nVersion self.hashPrevBlock = header.hashPrevBlock self.hashMerkleRoot = header.hashMerkleRoot self.nTime = header.nTime self.nBits = header.nBits self.nNonce = header.nNonce self.sha256 = header.sha256 self.hash = header.hash self.calc_sha256() def set_null(self): self.nVersion = 1 self.hashPrevBlock = 0 self.hashMerkleRoot = 0 self.nTime = 0 self.nBits = 0 self.nNonce = 0 self.sha256 = None self.hash = None def deserialize(self, f): self.nVersion = struct.unpack(" 1: newhashes = [] for i in range(0, len(hashes), 2): i2 = min(i + 1, len(hashes) - 1) newhashes.append(hash256(hashes[i] + hashes[i2])) hashes = newhashes return uint256_from_str(hashes[0]) def calc_merkle_root(self): hashes = [] for tx in self.vtx: tx.calc_sha256() hashes.append(ser_uint256(tx.sha256)) return self.get_merkle_root(hashes) def calc_witness_merkle_root(self): # For witness root purposes, the hash of the # coinbase, with witness, is defined to be 0...0 hashes = [ser_uint256(0)] for tx in self.vtx[1:]: # Calculate the hashes with witness data hashes.append(ser_uint256(tx.calc_sha256(True))) return self.get_merkle_root(hashes) def is_valid(self): self.calc_sha256() target = uint256_from_compact(self.nBits) if self.sha256 > target: return False for tx in self.vtx: if not tx.is_valid(): return False if self.calc_merkle_root() != self.hashMerkleRoot: return False return True def solve(self): self.rehash() target = uint256_from_compact(self.nBits) while self.sha256 > target: self.nNonce += 1 self.rehash() def __repr__(self): return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \ % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx)) class CUnsignedAlert(): def __init__(self): self.nVersion = 1 self.nRelayUntil = 0 self.nExpiration = 0 self.nID = 0 self.nCancel = 0 self.setCancel = [] self.nMinVer = 0 self.nMaxVer = 0 self.setSubVer = [] self.nPriority = 0 self.strComment = b"" self.strStatusBar = b"" self.strReserved = b"" def deserialize(self, f): self.nVersion = struct.unpack("= 106: self.addrFrom = CAddress() self.addrFrom.deserialize(f) self.nNonce = struct.unpack("= 209: self.nStartingHeight = struct.unpack("= 70001: # Relay field is optional for version 70001 onwards try: self.nRelay = struct.unpack(" class msg_headers(): command = b"headers" def __init__(self): self.headers = [] def deserialize(self, f): # comment in bitcoind indicates these should be deserialized as blocks blocks = deser_vector(f, CBlock) for x in blocks: self.headers.append(CBlockHeader(x)) def serialize(self): blocks = [CBlock(x) for x in self.headers] return ser_vector(blocks) def __repr__(self): return "msg_headers(headers=%s)" % repr(self.headers) class msg_reject(): command = b"reject" REJECT_MALFORMED = 1 def __init__(self): self.message = b"" self.code = 0 self.reason = b"" self.data = 0 def deserialize(self, f): self.message = deser_string(f) self.code = struct.unpack(" BIP0031_VERSION: conn.send_message(msg_pong(message.nonce)) def on_verack(self, conn, message): conn.ver_recv = conn.ver_send self.verack_received = True def on_version(self, conn, message): if message.nVersion >= 209: conn.send_message(msg_verack()) conn.ver_send = min(MY_VERSION, message.nVersion) if message.nVersion < 209: conn.ver_recv = conn.ver_send conn.nServices = message.nServices # Connection helper methods def add_connection(self, conn): self.connection = conn def wait_for_disconnect(self, timeout=60): def test_function(): return not self.connected - assert wait_until(test_function, timeout=timeout) + wait_until(test_function, timeout=timeout, lock=mininode_lock) # Message receiving helper methods def wait_for_block(self, blockhash, timeout=60): def test_function(): return self.last_message.get( "block") and self.last_message["block"].block.rehash() == blockhash - assert wait_until(test_function, timeout=timeout) + wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_getdata(self, timeout=60): def test_function(): return self.last_message.get("getdata") - assert wait_until(test_function, timeout=timeout) + wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_getheaders(self, timeout=60): def test_function(): return self.last_message.get("getheaders") - assert wait_until(test_function, timeout=timeout) + wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_inv(self, expected_inv, timeout=60): """Waits for an INV message and checks that the first inv object in the message was as expected.""" if len(expected_inv) > 1: raise NotImplementedError( "wait_for_inv() will only verify the first inv object") def test_function(): return self.last_message.get("inv") and \ self.last_message["inv"].inv[0].type == expected_inv[0].type and \ self.last_message["inv"].inv[0].hash == expected_inv[0].hash - assert wait_until(test_function, timeout=timeout) + wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_verack(self, timeout=60): def test_function(): return self.message_count["verack"] - assert wait_until(test_function, timeout=timeout) + wait_until(test_function, timeout=timeout, lock=mininode_lock) # Message sending helper functions def send_message(self, message): if self.connection: self.connection.send_message(message) else: logger.error("Cannot send message. No connection to node!") def send_and_ping(self, message): self.send_message(message) self.sync_with_ping() # Sync up with the node def sync_with_ping(self, timeout=60): self.send_message(msg_ping(nonce=self.ping_counter)) def test_function(): return self.last_message.get( "pong") and self.last_message["pong"].nonce == self.ping_counter - assert wait_until(test_function, timeout=timeout) + wait_until(test_function, timeout=timeout, lock=mininode_lock) self.ping_counter += 1 # The actual NodeConn class # This class provides an interface for a p2p connection to a specified node class NodeConn(asyncore.dispatcher): messagemap = { b"version": msg_version, b"verack": msg_verack, b"addr": msg_addr, b"alert": msg_alert, b"inv": msg_inv, b"getdata": msg_getdata, b"getblocks": msg_getblocks, b"tx": msg_tx, b"block": msg_block, b"getaddr": msg_getaddr, b"ping": msg_ping, b"pong": msg_pong, b"headers": msg_headers, b"getheaders": msg_getheaders, b"reject": msg_reject, b"mempool": msg_mempool, b"feefilter": msg_feefilter, b"sendheaders": msg_sendheaders, b"sendcmpct": msg_sendcmpct, b"cmpctblock": msg_cmpctblock, b"getblocktxn": msg_getblocktxn, b"blocktxn": msg_blocktxn } MAGIC_BYTES = { "mainnet": b"\xe3\xe1\xf3\xe8", "testnet3": b"\xf4\xe5\xf3\xf4", "regtest": b"\xda\xb5\xbf\xfa", } def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True): asyncore.dispatcher.__init__(self, map=mininode_socket_map) self.dstaddr = dstaddr self.dstport = dstport self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.sendbuf = b"" self.recvbuf = b"" self.ver_send = 209 self.ver_recv = 209 self.last_sent = 0 self.state = "connecting" self.network = net self.cb = callback self.disconnect = False self.nServices = 0 if send_version: # stuff version msg into sendbuf vt = msg_version() vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" vt.addrFrom.port = 0 self.send_message(vt, True) logger.info('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport)) try: self.connect((dstaddr, dstport)) except: self.handle_close() self.rpc = rpc def handle_connect(self): if self.state != "connected": logger.debug("Connected & Listening: \n") self.state = "connected" self.cb.on_open(self) def handle_close(self): logger.debug("Closing Connection to %s:%d... " % (self.dstaddr, self.dstport)) self.state = "closed" self.recvbuf = b"" self.sendbuf = b"" try: self.close() except: pass self.cb.on_close(self) def handle_read(self): try: with mininode_lock: t = self.recv(READ_BUFFER_SIZE) if len(t) > 0: self.recvbuf += t except: pass while True: msg = self.got_data() if msg == None: break self.got_message(msg) def readable(self): return True def writable(self): with mininode_lock: pre_connection = self.state == "connecting" length = len(self.sendbuf) return (length > 0 or pre_connection) def handle_write(self): with mininode_lock: # asyncore does not expose socket connection, only the first read/write # event, thus we must check connection manually here to know when we # actually connect if self.state == "connecting": self.handle_connect() if not self.writable(): return try: sent = self.send(self.sendbuf) except: self.handle_close() return self.sendbuf = self.sendbuf[sent:] def got_data(self): try: with mininode_lock: if len(self.recvbuf) < 4: return None if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]: raise ValueError("got garbage %s" % repr(self.recvbuf)) if self.ver_recv < 209: if len(self.recvbuf) < 4 + 12 + 4: return None command = self.recvbuf[4:4 + 12].split(b"\x00", 1)[0] msglen = struct.unpack( "= 209: th = sha256(data) h = sha256(th) tmsg += h[:4] tmsg += data with mininode_lock: self.sendbuf += tmsg self.last_sent = time.time() def got_message(self, message): if message.command == b"version": if message.nVersion <= BIP0031_VERSION: self.messagemap[b'ping'] = msg_ping_prebip31 if self.last_sent + 30 * 60 < time.time(): self.send_message(self.messagemap[b'ping']()) logger.debug("Received message from %s:%d: %s" % (self.dstaddr, self.dstport, repr(message))) self.cb.deliver(self, message) def disconnect_node(self): self.disconnect = True class NetworkThread(Thread): def run(self): while mininode_socket_map: # We check for whether to disconnect outside of the asyncore # loop to workaround the behavior of asyncore when using # select disconnected = [] for fd, obj in mininode_socket_map.items(): if obj.disconnect: disconnected.append(obj) [obj.handle_close() for obj in disconnected] asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1) # An exception we can raise if we detect a potential disconnect # (p2p or rpc) before the test is complete class EarlyDisconnectError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index d5dc98889..02e1c86b8 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -1,680 +1,703 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from base64 import b64encode from binascii import hexlify, unhexlify from decimal import Decimal, ROUND_DOWN import json import logging import os import random import re from subprocess import CalledProcessError import time from . import coverage from .authproxy import AuthServiceProxy, JSONRPCException logger = logging.getLogger("TestFramework.utils") # Assert functions ################## def assert_fee_amount(fee, tx_size, fee_per_kB): """Assert the fee was in range""" target_fee = tx_size * fee_per_kB / 1000 if fee < target_fee: raise AssertionError( "Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee))) # allow the wallet's estimation to be at most 2 bytes off if fee > (tx_size + 2) * fee_per_kB / 1000: raise AssertionError( "Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee))) def assert_equal(thing1, thing2, *args): if thing1 != thing2 or any(thing1 != arg for arg in args): raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args)) def assert_greater_than(thing1, thing2): if thing1 <= thing2: raise AssertionError("%s <= %s" % (str(thing1), str(thing2))) def assert_greater_than_or_equal(thing1, thing2): if thing1 < thing2: raise AssertionError("%s < %s" % (str(thing1), str(thing2))) def assert_raises(exc, fun, *args, **kwds): assert_raises_message(exc, None, fun, *args, **kwds) def assert_raises_message(exc, message, fun, *args, **kwds): try: fun(*args, **kwds) except exc as e: if message is not None and message not in e.error['message']: raise AssertionError( "Expected substring not found:" + e.error['message']) except Exception as e: raise AssertionError( "Unexpected exception raised: " + type(e).__name__) else: raise AssertionError("No exception raised") def assert_raises_process_error(returncode, output, fun, *args, **kwds): """Execute a process and asserts the process return code and output. Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError and verifies that the return code and output are as expected. Throws AssertionError if no CalledProcessError was raised or if the return code and output are not as expected. Args: returncode (int): the process return code. output (string): [a substring of] the process output. fun (function): the function to call. This should execute a process. args*: positional arguments for the function. kwds**: named arguments for the function. """ try: fun(*args, **kwds) except CalledProcessError as e: if returncode != e.returncode: raise AssertionError("Unexpected returncode %i" % e.returncode) if output not in e.output: raise AssertionError("Expected substring not found:" + e.output) else: raise AssertionError("No exception raised") def assert_raises_jsonrpc(code, message, fun, *args, **kwds): """Run an RPC and verify that a specific JSONRPC exception code and message is raised. Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException and verifies that the error code and message are as expected. Throws AssertionError if no JSONRPCException was raised or if the error code/message are not as expected. Args: code (int), optional: the error code returned by the RPC call (defined in src/rpc/protocol.h). Set to None if checking the error code is not required. message (string), optional: [a substring of] the error string returned by the RPC call. Set to None if checking the error string is not required. fun (function): the function to call. This should be the name of an RPC. args*: positional arguments for the function. kwds**: named arguments for the function. """ try: fun(*args, **kwds) except JSONRPCException as e: # JSONRPCException was thrown as expected. Check the code and message values are correct. if (code is not None) and (code != e.error["code"]): raise AssertionError( "Unexpected JSONRPC error code %i" % e.error["code"]) if (message is not None) and (message not in e.error['message']): raise AssertionError( "Expected substring not found:" + e.error['message']) except Exception as e: raise AssertionError( "Unexpected exception raised: " + type(e).__name__) else: raise AssertionError("No exception raised") def assert_is_hex_string(string): try: int(string, 16) except Exception as e: raise AssertionError( "Couldn't interpret %r as hexadecimal; raised: %s" % (string, e)) def assert_is_hash_string(string, length=64): if not isinstance(string, str): raise AssertionError("Expected a string, got type %r" % type(string)) elif length and len(string) != length: raise AssertionError( "String of length %d expected; got %d" % (length, len(string))) elif not re.match('[abcdef0-9]+$', string): raise AssertionError( "String %r contains invalid characters for a hash." % string) def assert_array_result(object_array, to_match, expected, should_not_find=False): """ Pass in array of JSON objects, a dictionary with key/value pairs to match against, and another dictionary with expected key/value pairs. If the should_not_find flag is true, to_match should not be found in object_array """ if should_not_find: assert_equal(expected, {}) num_matched = 0 for item in object_array: all_match = True for key, value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue elif should_not_find: num_matched = num_matched + 1 for key, value in expected.items(): if item[key] != value: raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value))) num_matched = num_matched + 1 if num_matched == 0 and not should_not_find: raise AssertionError("No objects matched %s" % (str(to_match))) if num_matched > 0 and should_not_find: raise AssertionError("Objects were found %s" % (str(to_match))) # Utility functions ################### def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def count_bytes(hex_string): return len(bytearray.fromhex(hex_string)) def bytes_to_hex_str(byte_str): return hexlify(byte_str).decode('ascii') def hex_str_to_bytes(hex_str): return unhexlify(hex_str.encode('ascii')) def str_to_b64str(string): return b64encode(string.encode('utf-8')).decode('ascii') def satoshi_round(amount): return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) + +def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None): + if attempts == float('inf') and timeout == float('inf'): + timeout = 60 + attempt = 0 + timeout += time.time() + + while attempt < attempts and time.time() < timeout: + if lock: + with lock: + if predicate(): + return + else: + if predicate(): + return + attempt += 1 + time.sleep(0.05) + + # Print the cause of the timeout + assert_greater_than(attempts, attempt) + assert_greater_than(timeout, time.time()) + raise RuntimeError('Unreachable') + # RPC/P2P connection constants and functions ############################################ # The maximum number of nodes a single test can spawn MAX_NODES = 8 # Don't assign rpc or p2p ports lower than this PORT_MIN = 11000 # The number of ports to "reserve" for p2p and rpc, each PORT_RANGE = 5000 class PortSeed: # Must be initialized with a unique integer for each process n = None def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None): """ Args: url (str): URL of the RPC server to call node_number (int): the node number (or id) that this calls to Kwargs: timeout (int): HTTP timeout in seconds Returns: AuthServiceProxy. convenience object for making RPC calls. """ proxy_kwargs = {} if timeout is not None: proxy_kwargs['timeout'] = timeout proxy = AuthServiceProxy(url, **proxy_kwargs) proxy.url = url # store URL on proxy for info coverage_logfile = coverage.get_filename( coveragedir, node_number) if coveragedir else None return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile) def p2p_port(n): assert(n <= MAX_NODES) return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) def rpc_port(n): return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) def rpc_url(datadir, i, rpchost=None): rpc_u, rpc_p = get_auth_cookie(datadir) host = '127.0.0.1' port = rpc_port(i) if rpchost: parts = rpchost.split(':') if len(parts) == 2: host, port = parts else: host = rpchost return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port)) # Node functions ################ def initialize_datadir(dirname, n): datadir = os.path.join(dirname, "node" + str(n)) if not os.path.isdir(datadir): os.makedirs(datadir) with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f: f.write("regtest=1\n") f.write("port=" + str(p2p_port(n)) + "\n") f.write("rpcport=" + str(rpc_port(n)) + "\n") f.write("listenonion=0\n") f.write("usecashaddr=1\n") return datadir def get_datadir_path(dirname, n): return os.path.join(dirname, "node" + str(n)) def get_auth_cookie(datadir): user = None password = None if os.path.isfile(os.path.join(datadir, "bitcoin.conf")): with open(os.path.join(datadir, "bitcoin.conf"), 'r') as f: for line in f: if line.startswith("rpcuser="): assert user is None # Ensure that there is only one rpcuser line user = line.split("=")[1].strip("\n") if line.startswith("rpcpassword="): assert password is None # Ensure that there is only one rpcpassword line password = line.split("=")[1].strip("\n") if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")): with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f: userpass = f.read() split_userpass = userpass.split(':') user = split_userpass[0] password = split_userpass[1] if user is None or password is None: raise ValueError("No RPC credentials") return user, password def log_filename(dirname, n_node, logname): return os.path.join(dirname, "node" + str(n_node), "regtest", logname) def get_bip9_status(node, key): info = node.getblockchaininfo() return info['bip9_softforks'][key] def set_node_times(nodes, t): for node in nodes: node.setmocktime(t) def disconnect_nodes(from_connection, node_num): for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]: from_connection.disconnectnode(nodeid=peer_id) for _ in range(50): if [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == []: break time.sleep(0.1) else: raise AssertionError("timed out waiting for disconnect") def connect_nodes(from_connection, node_num): ip_port = "127.0.0.1:" + str(p2p_port(node_num)) from_connection.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()): time.sleep(0.1) def connect_nodes_bi(nodes, a, b): connect_nodes(nodes[a], b) connect_nodes(nodes[b], a) def sync_blocks(rpc_connections, *, wait=1, timeout=60): """ Wait until everybody has the same tip. sync_blocks needs to be called with an rpc_connections set that has least one node already synced to the latest, stable tip, otherwise there's a chance it might return before all nodes are stably synced. """ # Use getblockcount() instead of waitforblockheight() to determine the # initial max height because the two RPCs look at different internal global # variables (chainActive vs latestBlock) and the former gets updated # earlier. maxheight = max(x.getblockcount() for x in rpc_connections) start_time = cur_time = time.time() while cur_time <= start_time + timeout: tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections] if all(t["height"] == maxheight for t in tips): if all(t["hash"] == tips[0]["hash"] for t in tips): return raise AssertionError("Block sync failed, mismatched block hashes:{}".format( "".join("\n {!r}".format(tip) for tip in tips))) cur_time = time.time() raise AssertionError("Block sync to height {} timed out:{}".format( maxheight, "".join("\n {!r}".format(tip) for tip in tips))) def sync_chain(rpc_connections, *, wait=1, timeout=60): """ Wait until everybody has the same best block """ while timeout > 0: best_hash = [x.getbestblockhash() for x in rpc_connections] if best_hash == [best_hash[0]] * len(best_hash): return time.sleep(wait) timeout -= wait raise AssertionError("Chain sync failed: Best block hashes don't match") def sync_mempools(rpc_connections, *, wait=1, timeout=60): """ Wait until everybody has the same transactions in their memory pools """ while timeout > 0: pool = set(rpc_connections[0].getrawmempool()) num_match = 1 for i in range(1, len(rpc_connections)): if set(rpc_connections[i].getrawmempool()) == pool: num_match = num_match + 1 if num_match == len(rpc_connections): return time.sleep(wait) timeout -= wait raise AssertionError("Mempool sync failed") # Transaction/Block functions ############################# def find_output(node, txid, amount): """ Return index to output of txid with value amount Raises exception if there is none. """ txdata = node.getrawtransaction(txid, 1) for i in range(len(txdata["vout"])): if txdata["vout"][i]["value"] == amount: return i raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount))) def gather_inputs(from_node, amount_needed, confirmations_required=1): """ Return a random set of unspent txouts that are enough to pay amount_needed """ assert(confirmations_required >= 0) utxo = from_node.listunspent(confirmations_required) random.shuffle(utxo) inputs = [] total_in = Decimal("0.00000000") while total_in < amount_needed and len(utxo) > 0: t = utxo.pop() total_in += t["amount"] inputs.append( {"txid": t["txid"], "vout": t["vout"], "address": t["address"]}) if total_in < amount_needed: raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in)) return (total_in, inputs) def make_change(from_node, amount_in, amount_out, fee): """ Create change output(s), return them """ outputs = {} amount = amount_out + fee change = amount_in - amount if change > amount * 2: # Create an extra change output to break up big inputs change_address = from_node.getnewaddress() # Split change in two, being careful of rounding: outputs[change_address] = Decimal( change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) change = amount_in - amount - outputs[change_address] if change > 0: outputs[from_node.getnewaddress()] = change return outputs def send_zeropri_transaction(from_node, to_node, amount, fee): """ Create&broadcast a zero-priority transaction. Returns (txid, hex-encoded-txdata) Ensures transaction is zero-priority by first creating a send-to-self, then using its output """ # Create a send-to-self with confirmed inputs: self_address = from_node.getnewaddress() (total_in, inputs) = gather_inputs(from_node, amount + fee * 2) outputs = make_change(from_node, total_in, amount + fee, fee) outputs[self_address] = float(amount + fee) self_rawtx = from_node.createrawtransaction(inputs, outputs) self_signresult = from_node.signrawtransaction( self_rawtx, None, None, "ALL|FORKID") self_txid = from_node.sendrawtransaction(self_signresult["hex"], True) vout = find_output(from_node, self_txid, amount + fee) # Now immediately spend the output to create a 1-input, 1-output # zero-priority transaction: inputs = [{"txid": self_txid, "vout": vout}] outputs = {to_node.getnewaddress(): float(amount)} rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx, None, None, "ALL|FORKID") txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"]) def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random zero-priority transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment * random.randint(0, fee_variants) (txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee) return (txid, txhex, fee) def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment * random.randint(0, fee_variants) (total_in, inputs) = gather_inputs(from_node, amount + fee) outputs = make_change(from_node, total_in, amount, fee) outputs[to_node.getnewaddress()] = float(amount) rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx, None, None, "ALL|FORKID") txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"], fee) # Helper to create at least "count" utxos # Pass in a fee that is sufficient for relay and mining new transactions. def create_confirmed_utxos(fee, node, count, age=101): to_generate = int(0.5 * count) + age while to_generate > 0: node.generate(min(25, to_generate)) to_generate -= 25 utxos = node.listunspent() iterations = count - len(utxos) addr1 = node.getnewaddress() addr2 = node.getnewaddress() if iterations <= 0: return utxos for i in range(iterations): t = utxos.pop() inputs = [] inputs.append({"txid": t["txid"], "vout": t["vout"]}) outputs = {} send_value = t['amount'] - fee outputs[addr1] = satoshi_round(send_value / 2) outputs[addr2] = satoshi_round(send_value / 2) raw_tx = node.createrawtransaction(inputs, outputs) signed_tx = node.signrawtransaction( raw_tx, None, None, "ALL|FORKID")["hex"] node.sendrawtransaction(signed_tx) while (node.getmempoolinfo()['size'] > 0): node.generate(1) utxos = node.listunspent() assert(len(utxos) >= count) return utxos # Create large OP_RETURN txouts that can be appended to a transaction # to make it large (helper for constructing large transactions). def gen_return_txouts(): # Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create # So we have big transactions (and therefore can't fit very many into each block) # create one script_pubkey script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes for i in range(512): script_pubkey = script_pubkey + "01" # concatenate 128 txouts of above script_pubkey which we'll insert before # the txout for change txouts = "81" for k in range(128): # add txout value txouts = txouts + "0000000000000000" # add length of script_pubkey txouts = txouts + "fd0402" # add script_pubkey txouts = txouts + script_pubkey return txouts def create_tx(node, coinbase, to_address, amount): inputs = [{"txid": coinbase, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransaction(rawtx, None, None, "ALL|FORKID") assert_equal(signresult["complete"], True) return signresult["hex"] # Create a spend of each passed-in utxo, splicing in "txouts" to each raw # transaction to make it large. See gen_return_txouts() above. def create_lots_of_big_transactions(node, txouts, utxos, num, fee): addr = node.getnewaddress() txids = [] for _ in range(num): t = utxos.pop() inputs = [{"txid": t["txid"], "vout": t["vout"]}] outputs = {} change = t['amount'] - fee outputs[addr] = satoshi_round(change) rawtx = node.createrawtransaction(inputs, outputs) newtx = rawtx[0:92] newtx = newtx + txouts newtx = newtx + rawtx[94:] signresult = node.signrawtransaction(newtx, None, None, "NONE|FORKID") txid = node.sendrawtransaction(signresult["hex"], True) txids.append(txid) return txids def mine_large_block(node, utxos=None): # generate a 66k transaction, # and 14 of them is close to the 1MB block limit num = 14 txouts = gen_return_txouts() utxos = utxos if utxos is not None else [] if len(utxos) < num: utxos.clear() utxos.extend(node.listunspent()) fee = 100 * node.getnetworkinfo()["relayfee"] create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee) node.generate(1) def get_srcdir(calling_script=None): """ Try to find out the base folder containing the 'src' folder. If SRCDIR is set it does a sanity check and returns that. Otherwise it goes on a search and rescue mission. Returns None if it cannot find a suitable folder. TODO: This is only used for cdefs, consider moving that there. """ def contains_src(path_to_check): if not path_to_check: return False else: cand_path = os.path.join(path_to_check, 'src') return os.path.exists(cand_path) and os.path.isdir(cand_path) srcdir = os.environ.get('SRCDIR', '') if contains_src(srcdir): return srcdir # If we have a caller, try to guess from its location where the # top level might be. if calling_script: caller_basedir = os.path.dirname( os.path.dirname(os.path.dirname(calling_script))) if caller_basedir != '' and contains_src(os.path.abspath(caller_basedir)): return os.path.abspath(caller_basedir) # Try to work it based out on main module # We might expect the caller to be rpc-tests.py or a test script # itself. import sys mainmod = sys.modules['__main__'] mainmod_path = getattr(mainmod, '__file__', '') if mainmod_path and mainmod_path.endswith('.py'): maybe_top = os.path.dirname( os.path.dirname(os.path.dirname(mainmod_path))) if contains_src(os.path.abspath(maybe_top)): return os.path.abspath(maybe_top) # No luck, give up. return None