diff --git a/test/functional/maxuploadtarget.py b/test/functional/maxuploadtarget.py --- a/test/functional/maxuploadtarget.py +++ b/test/functional/maxuploadtarget.py @@ -3,12 +3,6 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -from test_framework.mininode import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -import time -from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE - ''' Test behavior of -maxuploadtarget. @@ -18,61 +12,28 @@ if uploadtarget has been reached. * Verify that the upload counters are reset after 24 hours. ''' +from collections import defaultdict +import time -# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending -# p2p messages to a node, generating the messages in the main testing logic. +from test_framework.mininode import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE class TestNode(NodeConnCB): def __init__(self): super().__init__() - self.connection = None - self.ping_counter = 1 - self.last_pong = msg_pong() - self.block_receive_map = {} - - def add_connection(self, conn): - self.connection = conn - self.peer_disconnected = False + self.block_receive_map = defaultdict(int) def on_inv(self, conn, message): pass - # Track the last getdata message we receive (used in the test) - def on_getdata(self, conn, message): - self.last_getdata = message - def on_block(self, conn, message): message.block.calc_sha256() - try: - self.block_receive_map[message.block.sha256] += 1 - except KeyError as e: - self.block_receive_map[message.block.sha256] = 1 - - # Spin until verack message is received from the node. - # We use this to signal that our test can begin. This - # is called from the testing thread, so it needs to acquire - # the global lock. - def wait_for_verack(self): - def veracked(): - return self.verack_received - return wait_until(veracked, timeout=10) - - def wait_for_disconnect(self): - def disconnected(): - return self.peer_disconnected - return wait_until(disconnected, timeout=10) - - # Wrapper for the NodeConn's send_message function - def send_message(self, message): - self.connection.send_message(message) - - def on_pong(self, conn, message): - self.last_pong = message - - def on_close(self, conn): - self.peer_disconnected = True + self.block_receive_map[message.block.sha256] += 1 + class MaxUploadTest(BitcoinTestFramework): @@ -201,37 +162,29 @@ self.nodes[0] = start_node(0, self.options.tmpdir, [ "-whitelist=127.0.0.1", "-maxuploadtarget=1"]) - # recreate/reconnect 3 test nodes - test_nodes = [] - connections = [] - - for i in range(3): - test_nodes.append(TestNode()) - connections.append( - NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i])) - test_nodes[i].add_connection(connections[i]) + # recreate/reconnect a test node + test_nodes = [TestNode()] + connections = [NodeConn('127.0.0.1', p2p_port( + 0), self.nodes[0], test_nodes[0])] + test_nodes[0].add_connection(connections[0]) - # Start up network handling in another thread - NetworkThread().start() - [x.wait_for_verack() for x in test_nodes] + NetworkThread().start() # Start up network handling in another thread + test_nodes[0].wait_for_verack() # retrieve 20 blocks which should be enough to break the 1MB limit getdata_request.inv = [CInv(2, big_new_block)] for i in range(20): - test_nodes[1].send_message(getdata_request) - test_nodes[1].sync_with_ping() - assert_equal(test_nodes[1].block_receive_map[big_new_block], i + 1) + test_nodes[0].send_message(getdata_request) + test_nodes[0].sync_with_ping() + assert_equal(test_nodes[0].block_receive_map[big_new_block], i + 1) getdata_request.inv = [CInv(2, big_old_block)] - test_nodes[1].send_message(getdata_request) - test_nodes[1].wait_for_disconnect() + test_nodes[0].send_and_ping(getdata_request) # node is still connected because of the whitelist - assert_equal(len(self.nodes[0].getpeerinfo()), 3) + assert_equal(len(self.nodes[0].getpeerinfo()), 1) self.log.info( - "Peer 1 still connected after trying to download old block (whitelisted)") - - [c.disconnect_node() for c in connections] + "Peer still connected after trying to download old block (whitelisted)") if __name__ == '__main__': diff --git a/test/functional/p2p-acceptblock.py b/test/functional/p2p-acceptblock.py --- a/test/functional/p2p-acceptblock.py +++ b/test/functional/p2p-acceptblock.py @@ -56,41 +56,6 @@ Node0 should process and the tip should advance. ''' -# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending -# p2p messages to a node, generating the messages in the main testing logic. - - -class TestNode(NodeConnCB): - - def __init__(self): - super().__init__() - self.connection = None - self.ping_counter = 1 - self.last_pong = msg_pong() - - def add_connection(self, conn): - self.connection = conn - - # Track the last getdata message we receive (used in the test) - def on_getdata(self, conn, message): - self.last_getdata = message - - # Spin until verack message is received from the node. - # We use this to signal that our test can begin. This is called from the - # testing thread, so it needs to acquire the global lock. - def wait_for_verack(self): - while True: - with mininode_lock: - if self.verack_received: - return - time.sleep(0.05) - - # Wrapper for the NodeConn's send_message function - def send_message(self, message): - self.connection.send_message(message) - - def on_pong(self, conn, message): - self.last_pong = message class AcceptBlockTest(BitcoinTestFramework): @@ -113,8 +78,8 @@ def run_test(self): # Setup the p2p connections and start up the network thread. - test_node = TestNode() # connects to node0 (not whitelisted) - white_node = TestNode() # connects to node1 (whitelisted) + test_node = NodeConnCB() # connects to node0 (not whitelisted) + white_node = NodeConnCB() # connects to node1 (whitelisted) connections = [] connections.append( @@ -252,12 +217,12 @@ # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request - test_node.last_getdata = None + test_node.last_message.pop("getdata", None) test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)])) test_node.sync_with_ping() with mininode_lock: - getdata = test_node.last_getdata + getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256) diff --git a/test/functional/p2p-compactblocks.py b/test/functional/p2p-compactblocks.py --- a/test/functional/p2p-compactblocks.py +++ b/test/functional/p2p-compactblocks.py @@ -8,7 +8,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment -from test_framework.siphash import siphash256 from test_framework.script import CScript, OP_TRUE ''' @@ -24,65 +23,32 @@ def __init__(self): super().__init__() self.last_sendcmpct = [] - self.last_headers = None - self.last_inv = None - self.last_cmpctblock = None self.block_announced = False - self.last_getdata = None - self.last_getheaders = None - self.last_getblocktxn = None - self.last_block = None - self.last_blocktxn = None # Store the hashes of blocks we've seen announced. # This is for synchronizing the p2p message traffic, # so we can eg wait until a particular block is announced. - self.set_announced_blockhashes = set() - self.connected = False - - def on_open(self, conn): - self.connected = True - - def on_close(self, conn): - self.connected = False + self.announced_blockhashes = set() def on_sendcmpct(self, conn, message): self.last_sendcmpct.append(message) - def on_block(self, conn, message): - self.last_block = message - def on_cmpctblock(self, conn, message): - self.last_cmpctblock = message self.block_announced = True - self.last_cmpctblock.header_and_shortids.header.calc_sha256() - self.set_announced_blockhashes.add( - self.last_cmpctblock.header_and_shortids.header.sha256) + self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() + self.announced_blockhashes.add( + self.last_message["cmpctblock"].header_and_shortids.header.sha256) def on_headers(self, conn, message): - self.last_headers = message self.block_announced = True - for x in self.last_headers.headers: + for x in self.last_message["headers"].headers: x.calc_sha256() - self.set_announced_blockhashes.add(x.sha256) + self.announced_blockhashes.add(x.sha256) def on_inv(self, conn, message): - self.last_inv = message - for x in self.last_inv.inv: + for x in self.last_message["inv"].inv: if x.type == 2: self.block_announced = True - self.set_announced_blockhashes.add(x.hash) - - def on_getdata(self, conn, message): - self.last_getdata = message - - def on_getheaders(self, conn, message): - self.last_getheaders = message - - def on_getblocktxn(self, conn, message): - self.last_getblocktxn = message - - def on_blocktxn(self, conn, message): - self.last_blocktxn = message + self.announced_blockhashes.add(x.hash) # Requires caller to hold mininode_lock def received_block_announcement(self): @@ -91,9 +57,9 @@ def clear_block_announcement(self): with mininode_lock: self.block_announced = False - self.last_inv = None - self.last_headers = None - self.last_cmpctblock = None + self.last_message.pop("inv", None) + self.last_message.pop("headers", None) + self.last_message.pop("cmpctblock", None) def get_headers(self, locator, hashstop): msg = msg_getheaders() @@ -109,15 +75,14 @@ def request_headers_and_sync(self, locator, hashstop=0): self.clear_block_announcement() self.get_headers(locator, hashstop) - assert(wait_until(self.received_block_announcement, timeout=30)) - assert(self.received_block_announcement()) + assert wait_until(self.received_block_announcement, timeout=30) self.clear_block_announcement() # Block until a block announcement for a particular block hash is # received. def wait_for_block_announcement(self, block_hash, timeout=30): def received_hash(): - return (block_hash in self.set_announced_blockhashes) + return (block_hash in self.announced_blockhashes) return wait_until(received_hash, timeout=timeout) def send_await_disconnect(self, message, timeout=30): @@ -213,16 +178,16 @@ with mininode_lock: assert predicate(peer), ( "block_hash={!r}, cmpctblock={!r}, inv={!r}".format( - block_hash, peer.last_cmpctblock, peer.last_inv)) + block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None))) # We shouldn't get any block announcements via cmpctblock yet. check_announcement_of_new_block( - node, test_node, lambda p: p.last_cmpctblock is None) + node, test_node, lambda p: "cmpctblock" not in p.last_message) # Try one more time, this time after requesting headers. test_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block( - node, test_node, lambda p: p.last_cmpctblock is None and p.last_inv is not None) + node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message) # Test a few ways of using sendcmpct that should NOT # result in compact block announcements. @@ -235,7 +200,7 @@ sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( - node, test_node, lambda p: p.last_cmpctblock is None) + node, test_node, lambda p: "cmpctblock" not in p.last_message) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) @@ -245,7 +210,7 @@ sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( - node, test_node, lambda p: p.last_cmpctblock is None) + node, test_node, lambda p: "cmpctblock" not in p.last_message) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) @@ -255,30 +220,30 @@ sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( - node, test_node, lambda p: p.last_cmpctblock is not None) + node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time (no headers sync should be needed!) check_announcement_of_new_block( - node, test_node, lambda p: p.last_cmpctblock is not None) + node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time, after turning on sendheaders test_node.send_and_ping(msg_sendheaders()) check_announcement_of_new_block( - node, test_node, lambda p: p.last_cmpctblock is not None) + node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time, after sending a version-1, announce=false message. sendcmpct.version = preferred_version - 1 sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( - node, test_node, lambda p: p.last_cmpctblock is not None) + node, test_node, lambda p: "cmpctblock" in p.last_message) # Now turn off announcements sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( - node, test_node, lambda p: p.last_cmpctblock is None and p.last_headers is not None) + node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message) if old_node is not None: # Verify that a peer using an older protocol version can receive @@ -289,7 +254,7 @@ # Header sync old_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block( - node, old_node, lambda p: p.last_cmpctblock is not None) + node, old_node, lambda p: "cmpctblock" in p.last_message) # This test actually causes bitcoind to (reasonably!) disconnect us, so do # this last. @@ -344,10 +309,10 @@ # Now fetch and check the compact block header_and_shortids = None with mininode_lock: - assert(test_node.last_cmpctblock is not None) + assert("cmpctblock" in test_node.last_message) # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs( - test_node.last_cmpctblock.header_and_shortids) + test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block( header_and_shortids, block_hash, block) @@ -363,10 +328,10 @@ # Now fetch and check the compact block header_and_shortids = None with mininode_lock: - assert(test_node.last_cmpctblock is not None) + assert("cmpctblock" in test_node.last_message) # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs( - test_node.last_cmpctblock.header_and_shortids) + test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block( header_and_shortids, block_hash, block) @@ -421,22 +386,23 @@ for announce in ["inv", "header"]: block = self.build_block_on_tip(node) with mininode_lock: - test_node.last_getdata = None + test_node.last_message.pop("getdata", None) if announce == "inv": test_node.send_message(msg_inv([CInv(2, block.sha256)])) success = wait_until( - lambda: test_node.last_getheaders is not None, timeout=30) + lambda: "getheaders" in test_node.last_message, timeout=30) assert(success) test_node.send_header_for_blocks([block]) else: test_node.send_header_for_blocks([block]) success = wait_until( - lambda: test_node.last_getdata is not None, timeout=30) + lambda: "getdata" in test_node.last_message, timeout=30) assert(success) - assert_equal(len(test_node.last_getdata.inv), 1) - assert_equal(test_node.last_getdata.inv[0].type, 4) - assert_equal(test_node.last_getdata.inv[0].hash, block.sha256) + assert_equal(len(test_node.last_message["getdata"].inv), 1) + assert_equal(test_node.last_message["getdata"].inv[0].type, 4) + assert_equal( + test_node.last_message["getdata"].inv[0].hash, block.sha256) # Send back a compactblock message that omits the coinbase comp_block = HeaderAndShortIDs() @@ -452,8 +418,8 @@ assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # Expect a getblocktxn message. with mininode_lock: - assert(test_node.last_getblocktxn is not None) - absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute( + assert("getblocktxn" in test_node.last_message) + absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute( ) assert_equal(absolute_indexes, [0]) # should be a coinbase request @@ -493,8 +459,8 @@ msg = msg_cmpctblock(compact_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: - assert(peer.last_getblocktxn is not None) - absolute_indexes = peer.last_getblocktxn.block_txn_request.to_absolute( + assert("getblocktxn" in peer.last_message) + absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute( ) assert_equal(absolute_indexes, expected_result) @@ -568,7 +534,7 @@ # Clear out last request. with mininode_lock: - test_node.last_getblocktxn = None + test_node.last_message.pop("getblocktxn", None) # Send compact block comp_block.initialize_from_block( @@ -577,7 +543,7 @@ node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256) with mininode_lock: # Shouldn't have gotten a request for any transaction - assert(test_node.last_getblocktxn is None) + assert("getblocktxn" not in test_node.last_message) # Incorrectly responding to a getblocktxn shouldn't cause the block to be # permanently failed. @@ -605,8 +571,8 @@ test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) absolute_indexes = [] with mininode_lock: - assert(test_node.last_getblocktxn is not None) - absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute( + assert("getblocktxn" in test_node.last_message) + absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute( ) assert_equal(absolute_indexes, [6, 7, 8, 9, 10]) @@ -630,12 +596,13 @@ # We should receive a getdata request success = wait_until( - lambda: test_node.last_getdata is not None, timeout=10) + lambda: "getdata" in test_node.last_message, timeout=10) assert(success) - assert_equal(len(test_node.last_getdata.inv), 1) - assert(test_node.last_getdata.inv[ - 0].type == 2 or test_node.last_getdata.inv[0].type == 2 | MSG_WITNESS_FLAG) - assert_equal(test_node.last_getdata.inv[0].hash, block.sha256) + assert_equal(len(test_node.last_message["getdata"].inv), 1) + assert(test_node.last_message["getdata"].inv[0].type == + 2 or test_node.last_message["getdata"].inv[0].type == 2 | MSG_WITNESS_FLAG) + assert_equal( + test_node.last_message["getdata"].inv[0].hash, block.sha256) # Deliver the block if version == 2: @@ -662,16 +629,16 @@ sorted(random.sample(range(len(block.vtx)), num_to_request))) test_node.send_message(msg) success = wait_until( - lambda: test_node.last_blocktxn is not None, timeout=10) + lambda: "blocktxn" in test_node.last_message, timeout=10) assert(success) [tx.calc_sha256() for tx in block.vtx] with mininode_lock: - assert_equal( - test_node.last_blocktxn.block_transactions.blockhash, int(block_hash, 16)) + assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int( + block_hash, 16)) all_indices = msg.block_txn_request.to_absolute() for index in all_indices: - tx = test_node.last_blocktxn.block_transactions.transactions.pop( + tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop( 0) tx.calc_sha256() assert_equal(tx.sha256, block.vtx[index].sha256) @@ -680,9 +647,9 @@ assert(tx.wit.is_null()) else: # Check that the witness matches - assert_equal( - tx.calc_sha256(True), block.vtx[index].calc_sha256(True)) - test_node.last_blocktxn = None + assert_equal(tx.calc_sha256(True), + block.vtx[index].calc_sha256(True)) + test_node.last_message.pop("blocktxn", None) current_height -= 1 # Next request should send a full block response, as we're past the @@ -691,14 +658,14 @@ msg.block_txn_request = BlockTransactionsRequest( int(block_hash, 16), [0]) with mininode_lock: - test_node.last_block = None - test_node.last_blocktxn = None + test_node.last_message.pop("block", None) + test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with mininode_lock: - test_node.last_block.block.calc_sha256() + test_node.last_message["block"].block.calc_sha256() assert_equal( - test_node.last_block.block.sha256, int(block_hash, 16)) - assert_equal(test_node.last_blocktxn, None) + test_node.last_message["block"].block.sha256, int(block_hash, 16)) + assert "blocktxn" not in test_node.last_message def test_compactblocks_not_at_tip(self, node, test_node): # Test that requesting old compactblocks doesn't work. @@ -712,7 +679,7 @@ test_node.clear_block_announcement() test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) success = wait_until( - lambda: test_node.last_cmpctblock is not None, timeout=30) + lambda: "cmpctblock" in test_node.last_message, timeout=30) assert(success) test_node.clear_block_announcement() @@ -720,15 +687,15 @@ wait_until(test_node.received_block_announcement, timeout=30) test_node.clear_block_announcement() with mininode_lock: - test_node.last_block = None + test_node.last_message.pop("block", None) test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) success = wait_until( - lambda: test_node.last_block is not None, timeout=30) + lambda: "block" in test_node.last_message, timeout=30) assert(success) with mininode_lock: - test_node.last_block.block.calc_sha256() + test_node.last_message["block"].block.calc_sha256() assert_equal( - test_node.last_block.block.sha256, int(new_blocks[0], 16)) + test_node.last_message["block"].block.sha256, int(new_blocks[0], 16)) # Generate an old compactblock, and verify that it's not accepted. cur_height = node.getblockcount() @@ -755,10 +722,10 @@ msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0]) with mininode_lock: - test_node.last_blocktxn = None + test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with mininode_lock: - assert(test_node.last_blocktxn is None) + assert "blocktxn" not in test_node.last_message def test_end_to_end_block_relay(self, node, listeners): utxo = self.utxos.pop(0) @@ -773,10 +740,11 @@ wait_until(lambda: l.received_block_announcement(), timeout=30) with mininode_lock: for l in listeners: - assert(l.last_cmpctblock is not None) - l.last_cmpctblock.header_and_shortids.header.calc_sha256() + assert "cmpctblock" in l.last_message + l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256( + ) assert_equal( - l.last_cmpctblock.header_and_shortids.header.sha256, block.sha256) + l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256) # Test that we don't get disconnected if we relay a compact block with valid header, # but invalid transactions. @@ -824,7 +792,7 @@ msg = msg_cmpctblock(cmpct_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: - assert(peer.last_getblocktxn is not None) + assert "getblocktxn" in peer.last_message return block, cmpct_block block, cmpct_block = announce_cmpct_block(node, stalling_peer) diff --git a/test/functional/p2p-feefilter.py b/test/functional/p2p-feefilter.py --- a/test/functional/p2p-feefilter.py +++ b/test/functional/p2p-feefilter.py @@ -28,9 +28,6 @@ time.sleep(1) return False -# TestNode: bare-bones "peer". Used to track which invs are received from a node -# and to send the node feefilter messages. - class TestNode(NodeConnCB): def __init__(self): @@ -46,10 +43,6 @@ with mininode_lock: self.txinvs = [] - def send_filter(self, feerate): - self.send_message(msg_feefilter(feerate)) - self.sync_with_ping() - class FeeFilterTest(BitcoinTestFramework): @@ -81,7 +74,7 @@ test_node.clear_invs() # Set a filter of 15 sat/byte - test_node.send_filter(15000) + test_node.send_and_ping(msg_feefilter(15000)) # Test that txs are still being received (paying 20 sat/byte) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) @@ -108,7 +101,7 @@ test_node.clear_invs() # Remove fee filter and check that txs are received again - test_node.send_filter(0) + test_node.send_and_ping(msg_feefilter(0)) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert(allInvsMatch(txids, test_node)) diff --git a/test/functional/p2p-leaktests.py b/test/functional/p2p-leaktests.py --- a/test/functional/p2p-leaktests.py +++ b/test/functional/p2p-leaktests.py @@ -24,15 +24,8 @@ def __init__(self): super().__init__() - self.connection = None self.unexpected_msg = False - self.connected = False - - def add_connection(self, conn): - self.connection = conn - - def send_message(self, message): - self.connection.send_message(message) + self.ever_connected = False def bad_message(self, message): self.unexpected_msg = True @@ -40,6 +33,7 @@ def on_open(self, conn): self.connected = True + self.ever_connected = True def on_version(self, conn, message): self.bad_message(message) @@ -90,10 +84,6 @@ class CNodeNoVersionBan(CLazyNode): - - def __init__(self): - super().__init__() - # send a bunch of veracks without sending a message. This should get us disconnected. # NOTE: implementation-specific check here. Remove if bitcoind ban # behavior changes @@ -160,8 +150,12 @@ NetworkThread().start() # Start up network handling in another thread - assert( - wait_until(lambda: no_version_bannode.connected and no_version_idlenode.connected and no_verack_idlenode.version_received, timeout=10)) + assert wait_until( + lambda: no_version_bannode.ever_connected, timeout=10) + assert wait_until( + lambda: no_version_idlenode.ever_connected, timeout=10) + assert wait_until( + lambda: no_verack_idlenode.version_received, timeout=10) # Mine a block and make sure that it's not sent to the connected nodes self.nodes[0].generate(1) @@ -170,7 +164,7 @@ time.sleep(5) # This node should have been banned - assert(no_version_bannode.connection.state == "closed") + assert not no_version_bannode.connected [conn.disconnect_node() for conn in connections] diff --git a/test/functional/p2p-mempool.py b/test/functional/p2p-mempool.py --- a/test/functional/p2p-mempool.py +++ b/test/functional/p2p-mempool.py @@ -8,80 +8,27 @@ from test_framework.util import * -class TestNode(NodeConnCB): - - def __init__(self): - super().__init__() - self.connection = None - self.ping_counter = 1 - self.last_pong = msg_pong() - self.block_receive_map = {} - - def add_connection(self, conn): - self.connection = conn - self.peer_disconnected = False - - def on_inv(self, conn, message): - pass - - # Track the last getdata message we receive (used in the test) - def on_getdata(self, conn, message): - self.last_getdata = message - - def on_block(self, conn, message): - message.block.calc_sha256() - try: - self.block_receive_map[message.block.sha256] += 1 - except KeyError as e: - self.block_receive_map[message.block.sha256] = 1 - - # Spin until verack message is received from the node. - # We use this to signal that our test can begin. This - # is called from the testing thread, so it needs to acquire - # the global lock. - def wait_for_verack(self): - def veracked(): - return self.verack_received - return wait_until(veracked, timeout=10) - - def wait_for_disconnect(self): - def disconnected(): - return self.peer_disconnected - return wait_until(disconnected, timeout=10) - - # Wrapper for the NodeConn's send_message function - def send_message(self, message): - self.connection.send_message(message) - - def on_pong(self, conn, message): - self.last_pong = message - - def on_close(self, conn): - self.peer_disconnected = True - - def send_mempool(self): - self.lastInv = [] - self.send_message(msg_mempool()) - - class P2PMempoolTests(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True - self.num_nodes = 1 - self.extra_args = [["-peerbloomfilters=0"]] + self.num_nodes = 2 + + def setup_network(self): + self.nodes = [start_node(0, self.options.tmpdir, [ + "-peerbloomfilters=0"])] def run_test(self): # connect a mininode - aTestNode = TestNode() + aTestNode = NodeConnCB() node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode) aTestNode.add_connection(node) NetworkThread().start() aTestNode.wait_for_verack() # request mempool - aTestNode.send_mempool() + aTestNode.send_message(msg_mempool()) aTestNode.wait_for_disconnect() # mininode must be disconnected at this point diff --git a/test/functional/p2p-timeouts.py b/test/functional/p2p-timeouts.py --- a/test/functional/p2p-timeouts.py +++ b/test/functional/p2p-timeouts.py @@ -29,20 +29,9 @@ class TestNode(NodeConnCB): - def __init__(self): - super().__init__() - self.connected = False - self.received_version = False - - def on_open(self, conn): - self.connected = True - - def on_close(self, conn): - self.connected = False - def on_version(self, conn, message): # Don't send a verack in response - self.received_version = True + pass class TimeoutsTest(BitcoinTestFramework): @@ -83,7 +72,7 @@ sleep(30) - assert(self.no_verack_node.received_version) + assert "version" in self.no_verack_node.last_message assert(self.no_verack_node.connected) assert(self.no_version_node.connected) diff --git a/test/functional/p2p-versionbits-warning.py b/test/functional/p2p-versionbits-warning.py --- a/test/functional/p2p-versionbits-warning.py +++ b/test/functional/p2p-versionbits-warning.py @@ -27,31 +27,10 @@ VB_UNKNOWN_BIT) VB_PATTERN = re.compile("^Warning.*versionbit") -# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending -# p2p messages to a node, generating the messages in the main testing logic. - - class TestNode(NodeConnCB): - - def __init__(self): - super().__init__() - self.connection = None - self.ping_counter = 1 - self.last_pong = msg_pong() - - def add_connection(self, conn): - self.connection = conn - def on_inv(self, conn, message): pass - # Wrapper for the NodeConn's send_message function - def send_message(self, message): - self.connection.send_message(message) - - def on_pong(self, conn, message): - self.last_pong = message - class VersionBitsWarningTest(BitcoinTestFramework): def __init__(self): diff --git a/test/functional/sendheaders.py b/test/functional/sendheaders.py --- a/test/functional/sendheaders.py +++ b/test/functional/sendheaders.py @@ -83,23 +83,17 @@ direct_fetch_response_time = 0.05 -class BaseNode(NodeConnCB): +class TestNode(NodeConnCB): def __init__(self): super().__init__() - self.last_inv = None - self.last_headers = None - self.last_block = None - self.last_getdata = None self.block_announced = False - self.last_getheaders = None - self.disconnected = False self.last_blockhash_announced = None def clear_last_announcement(self): with mininode_lock: self.block_announced = False - self.last_inv = None - self.last_headers = None + self.last_message.pop("inv", None) + self.last_message.pop("headers", None) # Request data for a list of block hashes def get_data(self, block_hashes): @@ -120,29 +114,17 @@ self.connection.send_message(msg) def on_inv(self, conn, message): - self.last_inv = message self.block_announced = True self.last_blockhash_announced = message.inv[-1].hash def on_headers(self, conn, message): - self.last_headers = message if len(message.headers): self.block_announced = True message.headers[-1].calc_sha256() self.last_blockhash_announced = message.headers[-1].sha256 def on_block(self, conn, message): - self.last_block = message.block - self.last_block.calc_sha256() - - def on_getdata(self, conn, message): - self.last_getdata = message - - def on_getheaders(self, conn, message): - self.last_getheaders = message - - def on_close(self, conn): - self.disconnected = True + self.last_message["block"].calc_sha256() # Test whether the last announcement we received had the # right header or the right inv @@ -158,44 +140,29 @@ success = True compare_inv = [] - if self.last_inv != None: - compare_inv = [x.hash for x in self.last_inv.inv] + if "inv" in self.last_message: + compare_inv = [x.hash for x in self.last_message["inv"].inv] if compare_inv != expect_inv: success = False hash_headers = [] - if self.last_headers != None: + if "headers" in self.last_message: # treat headers as a list of block hashes - hash_headers = [x.sha256 for x in self.last_headers.headers] + hash_headers = [ + x.sha256 for x in self.last_message["headers"].headers] if hash_headers != expect_headers: success = False - self.last_inv = None - self.last_headers = None + self.last_message.pop("inv", None) + self.last_message.pop("headers", None) return success - # Syncing helpers - def wait_for_block(self, blockhash, timeout=60): - def test_function(): return self.last_block != None and self.last_block.sha256 == blockhash - assert(wait_until(test_function, timeout=timeout)) - return - - def wait_for_getheaders(self, timeout=60): - def test_function(): return self.last_getheaders != None - assert(wait_until(test_function, timeout=timeout)) - return - def wait_for_getdata(self, hash_list, timeout=60): if hash_list == []: return - def test_function(): return self.last_getdata != None and [ - x.hash for x in self.last_getdata.inv] == hash_list - assert(wait_until(test_function, timeout=timeout)) - return - - def wait_for_disconnect(self, timeout=60): - def test_function(): return self.disconnected + def test_function(): return "getdata" in self.last_message and [ + x.hash for x in self.last_message["getdata"].inv] == hash_list assert(wait_until(test_function, timeout=timeout)) return @@ -214,23 +181,6 @@ getblocks_message.locator.vHave = locator self.send_message(getblocks_message) -# InvNode: This peer should only ever receive inv's, because it doesn't ever send a -# "sendheaders" message. - - -class InvNode(BaseNode): - - def __init__(self): - BaseNode.__init__(self) - -# TestNode: This peer is the one we use for most of the testing. - - -class TestNode(BaseNode): - - def __init__(self): - BaseNode.__init__(self) - class SendHeadersTest(BitcoinTestFramework): @@ -271,7 +221,7 @@ def run_test(self): # Setup the p2p connections and start up the network thread. - inv_node = InvNode() + inv_node = TestNode() test_node = TestNode() self.p2p_connections = [inv_node, test_node] @@ -390,8 +340,8 @@ inv_node.sync_with_ping() # This block should not be announced to the inv node (since it also # broadcast it) - assert_equal(inv_node.last_inv, None) - assert_equal(inv_node.last_headers, None) + assert "inv" not in inv_node.last_message + assert "headers" not in inv_node.last_message tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal( @@ -494,12 +444,12 @@ inv_node.send_message(msg_block(blocks[-1])) inv_node.sync_with_ping() # Make sure blocks are processed - test_node.last_getdata = None + test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() # should not have received any getdata messages with mininode_lock: - assert_equal(test_node.last_getdata, None) + assert "getdata" not in test_node.last_message # This time, direct fetch should work blocks = [] @@ -536,11 +486,11 @@ # Announcing one block on fork should not trigger direct fetch # (less work than tip) - test_node.last_getdata = None + test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[0:1]) test_node.sync_with_ping() with mininode_lock: - assert_equal(test_node.last_getdata, None) + assert "getdata" not in test_node.last_message # Announcing one more block on fork should trigger direct fetch for # both blocks (same work as tip) @@ -557,11 +507,11 @@ [x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time) # Announcing 1 more header should not trigger any response - test_node.last_getdata = None + test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[18:19]) test_node.sync_with_ping() with mininode_lock: - assert_equal(test_node.last_getdata, None) + assert "getdata" not in test_node.last_message self.log.info("Part 4: success!") @@ -572,7 +522,7 @@ # First we test that receipt of an unconnecting header doesn't prevent # chain sync. for i in range(10): - test_node.last_getdata = None + test_node.last_message.pop("getdata", None) blocks = [] # Create two more blocks. for j in range(2): @@ -584,7 +534,7 @@ height += 1 # Send the header of the second block -> this won't connect. with mininode_lock: - test_node.last_getheaders = None + test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[1]]) test_node.wait_for_getheaders(timeout=1) test_node.send_header_for_blocks(blocks) @@ -610,7 +560,7 @@ # Send a header that doesn't connect, check that we get a # getheaders. with mininode_lock: - test_node.last_getheaders = None + test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i]]) test_node.wait_for_getheaders(timeout=1) @@ -626,25 +576,21 @@ # Send a header that doesn't connect, check that we get a # getheaders. with mininode_lock: - test_node.last_getheaders = None + test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i % len(blocks)]]) test_node.wait_for_getheaders(timeout=1) # Eventually this stops working. - with mininode_lock: - self.last_getheaders = None test_node.send_header_for_blocks([blocks[-1]]) # Should get disconnected test_node.wait_for_disconnect() - with mininode_lock: - self.last_getheaders = True self.log.info("Part 5: success!") # Finally, check that the inv node never received a getdata request, # throughout the test - assert_equal(inv_node.last_getdata, None) + assert "getdata" not in inv_node.last_message if __name__ == '__main__': diff --git a/test/functional/test_framework/comptool.py b/test/functional/test_framework/comptool.py --- a/test/functional/test_framework/comptool.py +++ b/test/functional/test_framework/comptool.py @@ -213,9 +213,7 @@ return wait_until(disconnected, timeout=10) def wait_for_verack(self): - def veracked(): - return all(node.verack_received for node in self.test_nodes) - return wait_until(veracked, timeout=10) + return all(node.wait_for_verack() for node in self.test_nodes) def wait_for_pings(self, counter): def received_pongs(): diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -22,22 +22,23 @@ # ser_*, deser_*: functions that handle serialization/deserialization -import struct -import socket import asyncore -import time -import sys -import random -from .util import hex_str_to_bytes, bytes_to_hex_str -from io import BytesIO from codecs import encode +from collections import defaultdict +import copy import hashlib -from threading import RLock -from threading import Thread +from io import BytesIO import logging -import copy +import random +import socket +import struct +import sys +import time +from threading import RLock, Thread + from test_framework.siphash import siphash256 from test_framework.cdefs import MAX_BLOCK_SIGOPS_PER_MB +from test_framework.util import hex_str_to_bytes, bytes_to_hex_str BIP0031_VERSION = 60000 MY_VERSION = 70014 # past bip-31 for ping/pong @@ -1536,34 +1537,58 @@ r += self.block_transactions.serialize(with_witness=True) return r -# This is what a callback should look like for NodeConn -# Reimplement the on_* functions to provide handling for events - class NodeConnCB(object): + """Callback and helper functions for P2P connection to a bitcoind node. + + Individual testcases should subclass this and override the on_* methods + if they want to alter message handling behaviour. + """ def __init__(self): - self.verack_received = False + # Track whether we have a P2P connection open to the node + self.connected = False + self.connection = None + + # Track number of messages of each type received and the most recent + # message of each type + self.message_count = defaultdict(int) + self.last_message = {} + + # A count of the number of ping messages we've sent to the node + self.ping_counter = 1 + # deliver_sleep_time is helpful for debugging race conditions in p2p # tests; it causes message delivery to sleep for the specified time # before acquiring the global lock and delivering the next message. self.deliver_sleep_time = None + # Remember the services our peer has advertised self.peer_services = None - self.connection = None - self.ping_counter = 1 - self.last_pong = msg_pong() + + # Message receiving methods def deliver(self, conn, message): + """Receive message and dispatch message to appropriate callback. + + We keep a count of how many of each message type has been received + and the most recent message of each type. + + Optionally waits for deliver_sleep_time before dispatching message. + """ + deliver_sleep = self.get_deliver_sleep_time() if deliver_sleep is not None: time.sleep(deliver_sleep) with mininode_lock: try: - getattr(self, 'on_' + message.command.decode('ascii'))( - conn, message) + command = message.command.decode('ascii') + self.message_count[command] += 1 + self.last_message[command] = message + getattr(self, 'on_' + command)(conn, message) except: - logger.exception("ERROR delivering %s" % repr(message)) + print("ERROR delivering %s (%s)" % (repr(message), + sys.exc_info()[0])) def set_deliver_sleep_time(self, value): with mininode_lock: @@ -1573,8 +1598,15 @@ with mininode_lock: return self.deliver_sleep_time - # Callbacks which can be overridden by subclasses - ################################################# + # Callback methods. Can be overridden by subclasses in individual test + # cases to provide custom message handling behaviour. + + def on_open(self, conn): + self.connected = True + + def on_close(self, conn): + self.connected = False + self.connection = None def on_addr(self, conn, message): pass @@ -1584,8 +1616,6 @@ def on_blocktxn(self, conn, message): pass - def on_close(self, conn): pass - def on_cmpctblock(self, conn, message): pass def on_feefilter(self, conn, message): pass @@ -1604,7 +1634,7 @@ def on_mempool(self, conn): pass - def on_open(self, conn): pass + def on_pong(self, conn, message): pass def on_reject(self, conn, message): pass @@ -1626,9 +1656,6 @@ if conn.ver_send > BIP0031_VERSION: conn.send_message(msg_pong(message.nonce)) - def on_pong(self, conn, message): - self.last_pong = message - def on_verack(self, conn, message): conn.ver_recv = conn.ver_send self.verack_received = True @@ -1641,14 +1668,46 @@ conn.ver_recv = conn.ver_send conn.nServices = message.nServices - # Helper functions - ################## + # Connection helper methods + def add_connection(self, conn): self.connection = conn - # Wrapper for the NodeConn's send_message function + def wait_for_disconnect(self, timeout=60): + def test_function(): return not self.connected + assert wait_until(test_function, timeout=timeout) + + # Message receiving helper methods + + def wait_for_block(self, blockhash, timeout=60): + def test_function(): return self.last_message.get( + "block") and self.last_message["block"].block.rehash() == blockhash + assert wait_until(test_function, timeout=timeout) + + def wait_for_getdata(self, timeout=60): + def test_function(): return self.last_message.get("getdata") + assert wait_until(test_function, timeout=timeout) + + def wait_for_getheaders(self, timeout=60): + def test_function(): return self.last_message.get("getheaders") + assert wait_until(test_function, timeout=timeout) + + def wait_for_inv(self, expected_inv, timeout=60): + def test_function(): return self.last_message.get( + "inv") and self.last_message["inv"] != expected_inv + assert wait_until(test_function, timeout=timeout) + + def wait_for_verack(self, timeout=60): + def test_function(): return self.message_count["verack"] + assert wait_until(test_function, timeout=timeout) + + # Message sending helper functions + def send_message(self, message): - self.connection.send_message(message) + if self.connection: + self.connection.send_message(message) + else: + logger.error("Cannot send message. No connection to node!") def send_and_ping(self, message): self.send_message(message) @@ -1656,27 +1715,13 @@ # Sync up with the node def sync_with_ping(self, timeout=60): - def received_pong(): - return (self.last_pong.nonce == self.ping_counter) self.send_message(msg_ping(nonce=self.ping_counter)) - success = wait_until(received_pong, timeout=timeout) - if not success: - logger.error("sync_with_ping failed!") - raise AssertionError("sync_with_ping failed!") - self.ping_counter += 1 - - return success - # Spin until verack message is received from the node. - # Tests may want to use this as a signal that the test can begin. - # This can be called from the testing thread, so it needs to acquire the - # global lock. - def wait_for_verack(self): - while True: - with mininode_lock: - if self.verack_received: - return - time.sleep(0.05) + def test_function(): return self.last_message.get( + "pong") and self.last_message["pong"].nonce == self.ping_counter + assert wait_until(test_function, timeout=timeout) + self.ping_counter += 1 + return True # The actual NodeConn class # This class provides an interface for a p2p connection to a specified node