diff --git a/test/functional/p2p_filter.py b/test/functional/p2p_filter.py index 698c36db7..7a0efd2f9 100755 --- a/test/functional/p2p_filter.py +++ b/test/functional/p2p_filter.py @@ -1,120 +1,130 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test BIP 37 """ from test_framework.messages import ( MSG_BLOCK, MSG_FILTERED_BLOCK, msg_getdata, msg_filterload, msg_filteradd, msg_filterclear, ) from test_framework.mininode import P2PInterface from test_framework.test_framework import BitcoinTestFramework class FilterNode(P2PInterface): # This is a P2SH watch-only wallet watch_script_pubkey = 'a914ffffffffffffffffffffffffffffffffffffffff87' # The initial filter (n=10, fp=0.000001) with just the above scriptPubKey # added watch_filter_init = msg_filterload( data=b'@\x00\x08\x00\x80\x00\x00 \x00\xc0\x00 \x04\x00\x08$\x00\x04\x80\x00\x00 \x00\x00\x00\x00\x80\x00\x00@\x00\x02@ \x00', nHashFuncs=19, nTweak=0, nFlags=1, ) def on_inv(self, message): want = msg_getdata() for i in message.inv: # inv messages can only contain TX or BLOCK, so translate BLOCK to # FILTERED_BLOCK if i.type == MSG_BLOCK: i.type = MSG_FILTERED_BLOCK want.inv.append(i) if len(want.inv): self.send_message(want) def on_merkleblock(self, message): self.merkleblock_received = True def on_tx(self, message): self.tx_received = True class FilterTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = False self.num_nodes = 1 self.extra_args = [[ '-peerbloomfilters', '-whitelist=noban@127.0.0.1', # immediate tx relay ]] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): - self.log.info('Add filtered P2P connection to the node') filter_node = self.nodes[0].add_p2p_connection(FilterNode()) + + self.log.info('Check that too large filter is rejected') + with self.nodes[0].assert_debug_log(['Misbehaving']): + filter_node.send_and_ping( + msg_filterload( + data=b'\xaa', + nHashFuncs=51, + nTweak=0, + nFlags=1)) + + self.log.info('Add filtered P2P connection to the node') filter_node.send_and_ping(filter_node.watch_filter_init) filter_address = self.nodes[0].decodescript( filter_node.watch_script_pubkey)['addresses'][0] self.log.info( 'Check that we receive merkleblock and tx if the filter matches a tx in a block') block_hash = self.nodes[0].generatetoaddress(1, filter_address)[0] txid = self.nodes[0].getblock(block_hash)['tx'][0] filter_node.wait_for_merkleblock(block_hash) filter_node.wait_for_tx(txid) self.log.info( 'Check that we only receive a merkleblock if the filter does not match a tx in a block') filter_node.tx_received = False block_hash = self.nodes[0].generatetoaddress( 1, self.nodes[0].getnewaddress())[0] filter_node.wait_for_merkleblock(block_hash) assert not filter_node.tx_received self.log.info( 'Check that we not receive a tx if the filter does not match a mempool tx') filter_node.merkleblock_received = False filter_node.tx_received = False self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 90) filter_node.sync_with_ping() filter_node.sync_with_ping() assert not filter_node.merkleblock_received assert not filter_node.tx_received self.log.info( 'Check that we receive a tx in reply to a mempool msg if the filter matches a mempool tx') filter_node.merkleblock_received = False txid = self.nodes[0].sendtoaddress(filter_address, 90) filter_node.wait_for_tx(txid) assert not filter_node.merkleblock_received self.log.info( 'Check that after deleting filter all txs get relayed again') filter_node.send_and_ping(msg_filterclear()) for _ in range(5): txid = self.nodes[0].sendtoaddress( self.nodes[0].getnewaddress(), 7) filter_node.wait_for_tx(txid) self.log.info( "Check that division-by-zero remote crash bug [CVE-2013-5700] is fixed") filter_node.send_and_ping(msg_filterload(data=b'', nHashFuncs=1)) filter_node.send_and_ping( msg_filteradd( data=b'letstrytocrashthisnode')) if __name__ == '__main__': FilterTest().main() diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index 38d11813e..45ee14f7a 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -1,234 +1,252 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid network messages.""" import asyncio import struct from test_framework import messages -from test_framework.mininode import P2PDataStore, NetworkThread +from test_framework.mininode import ( + NetworkThread, + P2PDataStore, + P2PInterface, +) from test_framework.test_framework import BitcoinTestFramework class msg_unrecognized: """Nonsensical message. Modeled after similar types in test_framework.messages.""" msgtype = b'badmsg' def __init__(self, *, str_data): self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data def serialize(self): return messages.ser_string(self.str_data) def __repr__(self): return "{}(data={})".format(self.msgtype, self.str_data) class InvalidMessagesTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def run_test(self): """ . Test msg header 0. Send a bunch of large (2MB) messages of an unrecognized type. Check to see that it isn't an effective DoS against the node. 1. Send an oversized (2MB+) message and check that we're disconnected. 2. Send a few messages with an incorrect data size in the header, ensure the messages are ignored. """ self.test_magic_bytes() self.test_checksum() self.test_size() self.test_msgtype() + self.test_large_inv() node = self.nodes[0] self.node = node node.add_p2p_connection(P2PDataStore()) conn2 = node.add_p2p_connection(P2PDataStore()) # 2MB, per MAX_PROTOCOL_MESSAGE_LENGTH msg_limit = 2 * 1024 * 1024 # Account for the 4-byte length prefix valid_data_limit = msg_limit - 5 # # 0. # # Send as large a message as is valid, ensure we aren't disconnected but # also can't exhaust resources. # msg_at_size = msg_unrecognized(str_data="b" * valid_data_limit) assert len(msg_at_size.serialize()) == msg_limit self.log.info( "Sending a bunch of large, junk messages to test memory exhaustion. May take a bit...") # Run a bunch of times to test for memory exhaustion. for _ in range(80): node.p2p.send_message(msg_at_size) # Check that, even though the node is being hammered by nonsense from one # connection, it can still service other peers in a timely way. for _ in range(20): conn2.sync_with_ping(timeout=2) # Peer 1, despite serving up a bunch of nonsense, should still be # connected. self.log.info("Waiting for node to drop junk messages.") node.p2p.sync_with_ping(timeout=320) assert node.p2p.is_connected # # 1. # # Send an oversized message, ensure we're disconnected. # msg_over_size = msg_unrecognized(str_data="b" * (valid_data_limit + 1)) assert len(msg_over_size.serialize()) == (msg_limit + 1) with node.assert_debug_log(["Oversized header detected"]): # An unknown message type (or *any* message type) over # MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect. node.p2p.send_message(msg_over_size) node.p2p.wait_for_disconnect(timeout=4) node.disconnect_p2ps() conn = node.add_p2p_connection(P2PDataStore()) conn.wait_for_verack() # # 2. # # Send messages with an incorrect data size in the header. # actual_size = 100 msg = msg_unrecognized(str_data="b" * actual_size) # TODO: handle larger-than cases. I haven't been able to pin down what # behavior to expect. for wrong_size in (2, 77, 78, 79): self.log.info( "Sending a message with incorrect size of {}".format(wrong_size)) # Unmodified message should submit okay. node.p2p.send_and_ping(msg) # A message lying about its data size results in a disconnect when the incorrect # data size is less than the actual size. # # TODO: why does behavior change at 78 bytes? # node.p2p.send_raw_message( self._tweak_msg_data_size( msg, wrong_size)) # For some reason unknown to me, we sometimes have to push additional data to the # peer in order for it to realize a disconnect. try: node.p2p.send_message(messages.msg_ping(nonce=123123)) except IOError: pass node.p2p.wait_for_disconnect(timeout=10) node.disconnect_p2ps() node.add_p2p_connection(P2PDataStore()) # Node is still up. conn = node.add_p2p_connection(P2PDataStore()) def test_magic_bytes(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) async def swap_magic_bytes(): # Need to ignore all incoming messages from now, since they come # with "invalid" magic bytes conn._on_data = lambda: None conn.magic_bytes = b'\x00\x11\x22\x32' # Call .result() to block until the atomic swap is complete, otherwise # we might run into races later on asyncio.run_coroutine_threadsafe( swap_magic_bytes(), NetworkThread.network_event_loop).result() with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping']): conn.send_message(messages.msg_ping(nonce=0xff)) conn.wait_for_disconnect(timeout=1) self.nodes[0].disconnect_p2ps() def test_checksum(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['CHECKSUM ERROR (badmsg, 2 bytes), expected 78df0a04 was ffffffff']): msg = conn.build_message(msg_unrecognized(str_data="d")) cut_len = ( # magic 4 + # msgtype 12 + # len 4 ) # modify checksum msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:] self.nodes[0].p2p.send_raw_message(msg) conn.wait_for_disconnect() self.nodes[0].disconnect_p2ps() def test_size(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['']): msg = conn.build_message(msg_unrecognized(str_data="d")) cut_len = ( # magic 4 + # command 12 ) # modify len to MAX_SIZE + 1 msg = msg[:cut_len] + \ struct.pack(" 20): oversized-inv: message inv size() = 50001']): + msg = messages.msg_inv([messages.CInv(1, 1)] * 50001) + conn.send_and_ping(msg) + with self.nodes[0].assert_debug_log(['Misbehaving', 'peer=4 (20 -> 40): too-many-inv: message getdata size() = 50001']): + msg = messages.msg_getdata([messages.CInv(1, 1)] * 50001) + conn.send_and_ping(msg) + with self.nodes[0].assert_debug_log(['Misbehaving', 'peer=4 (40 -> 60): too-many-headers: headers message size = 2001']): + msg = messages.msg_headers([messages.CBlockHeader()] * 2001) + conn.send_and_ping(msg) + self.nodes[0].disconnect_p2ps() + def _tweak_msg_data_size(self, message, wrong_size): """ Return a raw message based on another message but with an incorrect data size in the message header. """ raw_msg = self.node.p2p.build_message(message) bad_size_bytes = struct.pack("= MAX_GETDATA_IN_FLIGHT, lock=mininode_lock) with mininode_lock: assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT) self.log.info( "Now check that if we send a NOTFOUND for a transaction, we'll get one more request") p.send_message(msg_notfound(vec=[CInv(t=1, h=txids[0])])) wait_until( lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT + 1, timeout=10, lock=mininode_lock) with mininode_lock: assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT + 1) WAIT_TIME = TX_EXPIRY_INTERVAL // 2 + TX_EXPIRY_INTERVAL self.log.info( "if we wait about {} minutes, we should eventually get more requests".format( WAIT_TIME / 60)) self.nodes[0].setmocktime(int(time.time() + WAIT_TIME)) wait_until(lambda: p.tx_getdata_count == MAX_GETDATA_IN_FLIGHT + 2) self.nodes[0].setmocktime(0) + def test_spurious_notfound(self): + self.log.info('Check that spurious notfound is ignored') + self.nodes[0].p2ps[0].send_message(msg_notfound(vec=[CInv(1, 1)])) + def run_test(self): # Setup the p2p connections self.peers = [] for node in self.nodes: for i in range(NUM_INBOUND): self.peers.append(node.add_p2p_connection(TestP2PConn())) self.log.info( "Nodes are setup with {} incoming connections each".format(NUM_INBOUND)) + self.test_spurious_notfound() + # Test the in-flight max first, because we want no transactions in # flight ahead of this test. self.test_in_flight_max() self.test_inv_block() self.test_tx_requests() if __name__ == '__main__': TxDownloadTest().main()