diff --git a/.arclint b/.arclint index 3d9389d77..3226d2151 100644 --- a/.arclint +++ b/.arclint @@ -1,341 +1,341 @@ { "linters": { "generated": { "type": "generated" }, "clang-format": { "type": "clang-format", "version": ">=12.0", "bin": [ "clang-format-12", "clang-format" ], "include": "(^(src|chronik)/.*\\.(h|c|cpp|mm)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "autopep8": { "type": "autopep8", "version": ">=1.3.4", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)", - "(^test/functional/[a-mt].*\\.py$)" + "(^test/functional/[a-qt].*\\.py$)" ], "flags": [ "--aggressive", "--ignore=W503,W504", "--max-line-length=88" ] }, "black": { "type": "black", "version": ">=23.0.0", "include": [ - "(^test/functional/[a-mt].*\\.py$)" + "(^test/functional/[a-qt].*\\.py$)" ], "flags": [ "--preview" ] }, "flake8": { "type": "flake8", "version": ">=5.0", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ], "flags": [ "--ignore=A003,E203,E303,E305,E501,E704,W503,W504", "--require-plugins=flake8-comprehensions,flake8-builtins" ] }, "lint-format-strings": { "type": "lint-format-strings", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/test/fuzz/strprintf.cpp$)" ] }, "check-doc": { "type": "check-doc", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)" }, "lint-tests": { "type": "lint-tests", "include": "(^src/(seeder/|rpc/|wallet/)?test/.*\\.(cpp)$)" }, "phpcs": { "type": "phpcs", "include": "(\\.php$)", "exclude": [ "(^arcanist/__phutil_library_.+\\.php$)" ], "phpcs.standard": "arcanist/phpcs.xml" }, "lint-locale-dependence": { "type": "lint-locale-dependence", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes/|leveldb/|secp256k1/|tinyformat.h|univalue/))", "(^src/bench/nanobench.h$)" ] }, "lint-cheader": { "type": "lint-cheader", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "spelling": { "type": "spelling", "exclude": [ "(^build-aux/m4/)", "(^depends/)", "(^doc/release-notes/)", "(^contrib/gitian-builder/)", "(^src/(qt/locale|secp256k1|univalue|leveldb)/)", "(^test/lint/dictionary/)", "(package-lock.json)" ], "spelling.dictionaries": [ "test/lint/dictionary/english.json" ] }, "lint-assert-with-side-effects": { "type": "lint-assert-with-side-effects", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-include-quotes": { "type": "lint-include-quotes", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-include-guard": { "type": "lint-include-guard", "include": "(^(src|chronik)/.*\\.h$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/tinyformat.h$)" ] }, "lint-include-source": { "type": "lint-include-source", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-std-chrono": { "type": "lint-std-chrono", "include": "(^(src|chronik)/.*\\.(h|cpp)$)" }, "lint-stdint": { "type": "lint-stdint", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/compat/assumptions.h$)" ] }, "lint-source-filename": { "type": "lint-source-filename", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-boost-dependencies": { "type": "lint-boost-dependencies", "include": "(^(src|chronik)/.*\\.(h|cpp)$)" }, "lint-python-encoding": { "type": "lint-python-encoding", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "lint-python-shebang": { "type": "lint-python-shebang", "include": "(\\.py$)", "exclude": [ "(__init__\\.py$)", "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "lint-bash-shebang": { "type": "lint-bash-shebang", "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)" ] }, "shellcheck": { "type": "shellcheck", "version": ">=0.7.0", "flags": [ "--external-sources", "--source-path=SCRIPTDIR" ], "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue)/)" ] }, "lint-shell-locale": { "type": "lint-shell-locale", "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue)/)", "(^cmake/utils/log-and-print-on-failure.sh)" ] }, "lint-cpp-void-parameters": { "type": "lint-cpp-void-parameters", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/compat/glibc_compat.cpp$)" ] }, "lint-logs": { "type": "lint-logs", "include": "(^(src|chronik)/.*\\.(h|cpp|rs)$)" }, "lint-qt": { "type": "lint-qt", "include": "(^src/qt/.*\\.(h|cpp)$)", "exclude": [ "(^src/qt/(locale|forms|res)/)" ] }, "lint-doxygen": { "type": "lint-doxygen", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-whitespace": { "type": "lint-whitespace", "include": "(\\.(ac|am|cmake|conf|in|include|json|m4|md|openrc|php|pl|rs|sh|txt|yml)$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "yamllint": { "type": "yamllint", "include": "(\\.(yml|yaml)$)", "exclude": "(^src/(secp256k1|univalue|leveldb)/)" }, "lint-check-nonfatal": { "type": "lint-check-nonfatal", "include": [ "(^src/rpc/.*\\.(h|c|cpp)$)", "(^src/wallet/rpc*.*\\.(h|c|cpp)$)" ], "exclude": "(^src/rpc/server.cpp)" }, "lint-markdown": { "type": "lint-markdown", "include": [ "(\\.md$)" ], "exclude": "(^contrib/gitian-builder/)" }, "lint-python-mypy": { "type": "lint-python-mypy", "version": ">=0.910", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)", "(^contrib/macdeploy/)" ], "flags": [ "--ignore-missing-imports", "--install-types", "--non-interactive" ] }, "lint-python-mutable-default": { "type": "lint-python-mutable-default", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "prettier": { "type": "prettier", "version": ">=2.6.0", "include": [ "(^cashtab/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)", "(^web/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)" ], "exclude": "(^web/.*/translations/.*\\.json$)" }, "lint-python-isort": { "type": "lint-python-isort", "version": ">=5.6.4", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "rustfmt": { "type": "rustfmt", "version": ">=1.5.1", "include": "(\\.rs$)" }, "eslint": { "type": "eslint", "version": ">=8.0.0", "include": [ "(cashtab/.*\\.js$)", "(apps/alias-server/.*\\.js$)", "(modules/ecashaddrjs/.*\\.js$)", "(apps/ecash-herald/.*\\.js$)", "(modules/chronik-client/.*\\.(js|jsx|ts|tsx)$)" ] }, "lint-python-flynt": { "type": "lint-python-flynt", "version": ">=0.78", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] } } } diff --git a/test/functional/p2p_add_connections.py b/test/functional/p2p_add_connections.py index 3da8951bc..3b2a90b66 100755 --- a/test/functional/p2p_add_connections.py +++ b/test/functional/p2p_add_connections.py @@ -1,224 +1,225 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test add_outbound_p2p_connection test framework functionality""" import random from test_framework.messages import NODE_AVALANCHE, NODE_NETWORK from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, check_node_connections # From net.h MAX_OUTBOUND_FULL_RELAY_CONNECTIONS = 16 MAX_BLOCK_RELAY_ONLY_CONNECTIONS = 2 # Override DEFAULT_MAX_AVALANCHE_OUTBOUND_CONNECTIONS with # -maxavalancheoutbound MAX_AVALANCHE_OUTBOUND_CONNECTIONS = 12 class P2PFeelerReceiver(P2PInterface): def on_version(self, message): # The bitcoind node closes feeler connections as soon as a version # message is received from the test framework. Don't send any responses # to the node's version message since the connection will already be # closed. pass class P2PAddConnections(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [ [ "-avaproofstakeutxoconfirmations=1", - f"-maxavalancheoutbound={MAX_AVALANCHE_OUTBOUND_CONNECTIONS}" + f"-maxavalancheoutbound={MAX_AVALANCHE_OUTBOUND_CONNECTIONS}", ], - [] + [], ] def setup_network(self): self.setup_nodes() # Don't connect the nodes def add_outbounds(self, node, quantity, conn_type): services = NODE_NETWORK if conn_type == "avalanche": services |= NODE_AVALANCHE for _ in range(quantity): self.log.debug( - f"Node {node.index}, {conn_type}: {self.p2p_idx[node.index]}") + f"Node {node.index}, {conn_type}: {self.p2p_idx[node.index]}" + ) node.add_outbound_p2p_connection( P2PInterface(), p2p_idx=self.p2p_idx[node.index], connection_type=conn_type, services=services, ) self.p2p_idx[node.index] += 1 def simple_test(self): - self.log.info( - "Connect to various outbound peers in a predetermined way") + self.log.info("Connect to various outbound peers in a predetermined way") self.p2p_idx = [0] * self.num_nodes - self.log.info( - f"Add {MAX_OUTBOUND_FULL_RELAY_CONNECTIONS} outbounds to node 0") + self.log.info(f"Add {MAX_OUTBOUND_FULL_RELAY_CONNECTIONS} outbounds to node 0") self.add_outbounds( - self.nodes[0], - MAX_OUTBOUND_FULL_RELAY_CONNECTIONS, - "outbound-full-relay") + self.nodes[0], MAX_OUTBOUND_FULL_RELAY_CONNECTIONS, "outbound-full-relay" + ) self.log.info( - f"Add {MAX_BLOCK_RELAY_ONLY_CONNECTIONS} block-relay-only connections to node 0") + f"Add {MAX_BLOCK_RELAY_ONLY_CONNECTIONS} block-relay-only connections to" + " node 0" + ) self.add_outbounds( - self.nodes[0], - MAX_BLOCK_RELAY_ONLY_CONNECTIONS, - "block-relay-only") + self.nodes[0], MAX_BLOCK_RELAY_ONLY_CONNECTIONS, "block-relay-only" + ) self.log.info( - f"Add {MAX_BLOCK_RELAY_ONLY_CONNECTIONS} block-relay-only connections to node 1") + f"Add {MAX_BLOCK_RELAY_ONLY_CONNECTIONS} block-relay-only connections to" + " node 1" + ) self.add_outbounds( - self.nodes[1], - MAX_BLOCK_RELAY_ONLY_CONNECTIONS, - "block-relay-only") + self.nodes[1], MAX_BLOCK_RELAY_ONLY_CONNECTIONS, "block-relay-only" + ) self.log.info("Add 5 inbound connections to node 1") for i in range(5): self.log.info(f"inbound: {i}") self.nodes[1].add_p2p_connection(P2PInterface()) self.log.info("Add 4 outbounds to node 1") self.add_outbounds(self.nodes[1], 4, "outbound-full-relay") self.log.info("Check the connections opened as expected") check_node_connections( node=self.nodes[0], num_in=0, - num_out=MAX_OUTBOUND_FULL_RELAY_CONNECTIONS + MAX_BLOCK_RELAY_ONLY_CONNECTIONS) + num_out=MAX_OUTBOUND_FULL_RELAY_CONNECTIONS + + MAX_BLOCK_RELAY_ONLY_CONNECTIONS, + ) check_node_connections( - node=self.nodes[1], - num_in=5, - num_out=4 + MAX_BLOCK_RELAY_ONLY_CONNECTIONS) + node=self.nodes[1], num_in=5, num_out=4 + MAX_BLOCK_RELAY_ONLY_CONNECTIONS + ) self.log.info("Disconnect p2p connections & try to re-open") self.nodes[0].disconnect_p2ps() self.p2p_idx[0] = 0 check_node_connections(node=self.nodes[0], num_in=0, num_out=0) - self.log.info( - f"Add {MAX_OUTBOUND_FULL_RELAY_CONNECTIONS} outbounds to node 0") + self.log.info(f"Add {MAX_OUTBOUND_FULL_RELAY_CONNECTIONS} outbounds to node 0") self.add_outbounds( - self.nodes[0], - MAX_OUTBOUND_FULL_RELAY_CONNECTIONS, - "outbound-full-relay") + self.nodes[0], MAX_OUTBOUND_FULL_RELAY_CONNECTIONS, "outbound-full-relay" + ) check_node_connections( - node=self.nodes[0], - num_in=0, - num_out=MAX_OUTBOUND_FULL_RELAY_CONNECTIONS) + node=self.nodes[0], num_in=0, num_out=MAX_OUTBOUND_FULL_RELAY_CONNECTIONS + ) self.log.info( - f"Add {MAX_BLOCK_RELAY_ONLY_CONNECTIONS} block-relay-only connections to node 0") + f"Add {MAX_BLOCK_RELAY_ONLY_CONNECTIONS} block-relay-only connections to" + " node 0" + ) self.add_outbounds( - self.nodes[0], - MAX_BLOCK_RELAY_ONLY_CONNECTIONS, - "block-relay-only") + self.nodes[0], MAX_BLOCK_RELAY_ONLY_CONNECTIONS, "block-relay-only" + ) check_node_connections( node=self.nodes[0], num_in=0, - num_out=MAX_OUTBOUND_FULL_RELAY_CONNECTIONS + MAX_BLOCK_RELAY_ONLY_CONNECTIONS) + num_out=MAX_OUTBOUND_FULL_RELAY_CONNECTIONS + + MAX_BLOCK_RELAY_ONLY_CONNECTIONS, + ) self.log.info("Restart node 0 and try to reconnect to p2ps") self.restart_node(0) self.p2p_idx[0] = 0 - self.log.info( - f"Add {MAX_OUTBOUND_FULL_RELAY_CONNECTIONS} outbounds to node 0") + self.log.info(f"Add {MAX_OUTBOUND_FULL_RELAY_CONNECTIONS} outbounds to node 0") self.add_outbounds( - self.nodes[0], - MAX_OUTBOUND_FULL_RELAY_CONNECTIONS, - "outbound-full-relay") + self.nodes[0], MAX_OUTBOUND_FULL_RELAY_CONNECTIONS, "outbound-full-relay" + ) check_node_connections( - node=self.nodes[0], - num_in=0, - num_out=MAX_OUTBOUND_FULL_RELAY_CONNECTIONS) + node=self.nodes[0], num_in=0, num_out=MAX_OUTBOUND_FULL_RELAY_CONNECTIONS + ) self.log.info( - f"Add {MAX_BLOCK_RELAY_ONLY_CONNECTIONS} block-relay-only connections to node 0") + f"Add {MAX_BLOCK_RELAY_ONLY_CONNECTIONS} block-relay-only connections to" + " node 0" + ) self.add_outbounds( - self.nodes[0], - MAX_BLOCK_RELAY_ONLY_CONNECTIONS, - "block-relay-only") + self.nodes[0], MAX_BLOCK_RELAY_ONLY_CONNECTIONS, "block-relay-only" + ) check_node_connections( node=self.nodes[0], num_in=0, - num_out=MAX_OUTBOUND_FULL_RELAY_CONNECTIONS + MAX_BLOCK_RELAY_ONLY_CONNECTIONS) + num_out=MAX_OUTBOUND_FULL_RELAY_CONNECTIONS + + MAX_BLOCK_RELAY_ONLY_CONNECTIONS, + ) check_node_connections( - node=self.nodes[1], - num_in=5, - num_out=4 + MAX_BLOCK_RELAY_ONLY_CONNECTIONS) + node=self.nodes[1], num_in=5, num_out=4 + MAX_BLOCK_RELAY_ONLY_CONNECTIONS + ) self.log.info("Add 1 feeler connection to node 0") feeler_conn = self.nodes[0].add_outbound_p2p_connection( - P2PFeelerReceiver(), p2p_idx=self.p2p_idx[0], connection_type="feeler") + P2PFeelerReceiver(), p2p_idx=self.p2p_idx[0], connection_type="feeler" + ) # Feeler connection is closed assert not feeler_conn.is_connected # Verify version message received assert_equal(feeler_conn.message_count["version"], 1) # Feeler connections do not request tx relay assert_equal(feeler_conn.last_message["version"].relay, 0) self.log.info("Connecting avalanche outbounds") self.add_outbounds( - self.nodes[0], - MAX_AVALANCHE_OUTBOUND_CONNECTIONS, - "avalanche") + self.nodes[0], MAX_AVALANCHE_OUTBOUND_CONNECTIONS, "avalanche" + ) check_node_connections( node=self.nodes[0], num_in=0, - num_out=MAX_OUTBOUND_FULL_RELAY_CONNECTIONS + - MAX_BLOCK_RELAY_ONLY_CONNECTIONS + - MAX_AVALANCHE_OUTBOUND_CONNECTIONS) + num_out=MAX_OUTBOUND_FULL_RELAY_CONNECTIONS + + MAX_BLOCK_RELAY_ONLY_CONNECTIONS + + MAX_AVALANCHE_OUTBOUND_CONNECTIONS, + ) def random_test(self): for node in self.nodes: node.disconnect_p2ps() self.p2p_idx = [0] * self.num_nodes remaining_outbounds = { "outbound-full-relay": MAX_OUTBOUND_FULL_RELAY_CONNECTIONS, "block-relay-only": MAX_BLOCK_RELAY_ONLY_CONNECTIONS, "avalanche": MAX_AVALANCHE_OUTBOUND_CONNECTIONS, } max_outbounds = sum(remaining_outbounds.values()) iterations = random.randint(1, 5 * max_outbounds) - self.log.info( - f"Randomly insert outbounds of various types {iterations} times") + self.log.info(f"Randomly insert outbounds of various types {iterations} times") for _ in range(iterations): conn_type = random.choice(list(remaining_outbounds)) if remaining_outbounds[conn_type] <= 0: continue self.add_outbounds(self.nodes[0], 1, conn_type) remaining_outbounds[conn_type] -= 1 check_node_connections( node=self.nodes[0], num_in=0, - num_out=max_outbounds - sum(remaining_outbounds.values())) + num_out=max_outbounds - sum(remaining_outbounds.values()), + ) def run_test(self): self.simple_test() self.random_test() -if __name__ == '__main__': +if __name__ == "__main__": P2PAddConnections().main() diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py index 1fa9549e9..66bd3b9ac 100755 --- a/test/functional/p2p_addr_relay.py +++ b/test/functional/p2p_addr_relay.py @@ -1,438 +1,451 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test addr relay """ import random import time from test_framework.messages import ( NODE_NETWORK, CAddress, msg_addr, msg_getaddr, msg_verack, ) from test_framework.p2p import P2PInterface, p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_greater_than class AddrReceiver(P2PInterface): num_ipv4_received = 0 test_addr_contents = False _tokens = 1 send_getaddr = True def __init__(self, test_addr_contents=False, send_getaddr=True): super().__init__() self.test_addr_contents = test_addr_contents self.send_getaddr = send_getaddr def on_addr(self, message): for addr in message.addrs: self.num_ipv4_received += 1 if self.test_addr_contents: # relay_tests checks the content of the addr messages match # expectations based on the message creation in setup_addr_msg assert_equal(addr.nServices, NODE_NETWORK) if not 8333 <= addr.port < 8343: raise AssertionError( - f"Invalid addr.port of {addr.port} (8333-8342 expected)") - assert addr.ip.startswith('123.123.123.') + f"Invalid addr.port of {addr.port} (8333-8342 expected)" + ) + assert addr.ip.startswith("123.123.123.") def on_getaddr(self, message): # When the node sends us a getaddr, it increments the addr relay tokens # for the connection by 1000 self._tokens += 1000 @property def tokens(self): with p2p_lock: return self._tokens def increment_tokens(self, n): # When we move mocktime forward, the node increments the addr relay # tokens for its peers with p2p_lock: self._tokens += n def addr_received(self): return self.num_ipv4_received != 0 def on_version(self, message): self.send_message(msg_verack()) - if (self.send_getaddr): + if self.send_getaddr: self.send_message(msg_getaddr()) def getaddr_received(self): - return self.message_count['getaddr'] > 0 + return self.message_count["getaddr"] > 0 class AddrTest(BitcoinTestFramework): counter = 0 mocktime = int(time.time()) def set_test_params(self): self.num_nodes = 1 self.extra_args = [["-whitelist=addr@127.0.0.1"]] def run_test(self): self.oversized_addr_test() self.relay_tests() self.inbound_blackhole_tests() # This test populates the addrman, which can impact the node's behavior # in subsequent tests self.getaddr_tests() self.blocksonly_mode_tests() self.rate_limit_tests() def setup_addr_msg(self, num): addrs = [] for i in range(num): addr = CAddress() addr.time = self.mocktime + i addr.nServices = NODE_NETWORK addr.ip = f"123.123.123.{self.counter % 256}" addr.port = 8333 + i addrs.append(addr) self.counter += 1 msg = msg_addr() msg.addrs = addrs return msg def setup_rand_addr_msg(self, num): addrs = [] for i in range(num): addr = CAddress() addr.time = self.mocktime + i addr.nServices = NODE_NETWORK addr.ip = ( f"{random.randrange(128,169)}.{random.randrange(1,255)}" f".{random.randrange(1,255)}.{random.randrange(1,255)}" ) addr.port = 8333 addrs.append(addr) msg = msg_addr() msg.addrs = addrs return msg def send_addr_msg(self, source, msg, receivers): source.send_and_ping(msg) # pop m_next_addr_send timer self.mocktime += 10 * 60 self.nodes[0].setmocktime(self.mocktime) for peer in receivers: peer.sync_send_with_ping() def oversized_addr_test(self): - self.log.info('Send an addr message that is too large') + self.log.info("Send an addr message that is too large") addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) msg = self.setup_addr_msg(1010) - with self.nodes[0].assert_debug_log(['addr message size = 1010']): + with self.nodes[0].assert_debug_log(["addr message size = 1010"]): addr_source.send_and_ping(msg) self.nodes[0].disconnect_p2ps() def relay_tests(self): - self.log.info('Test address relay') - self.log.info( - 'Check that addr message content is relayed and added to addrman') + self.log.info("Test address relay") + self.log.info("Check that addr message content is relayed and added to addrman") addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) num_receivers = 7 receivers = [] for _ in range(num_receivers): receivers.append( - self.nodes[0].add_p2p_connection( - AddrReceiver( - test_addr_contents=True))) + self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True)) + ) # Keep this with length <= 10. Addresses from larger messages are not # relayed. num_ipv4_addrs = 10 msg = self.setup_addr_msg(num_ipv4_addrs) with self.nodes[0].assert_debug_log( [ - f'Added {num_ipv4_addrs} addresses from 127.0.0.1: 0 tried', - 'received: addr (301 bytes) peer=1', + f"Added {num_ipv4_addrs} addresses from 127.0.0.1: 0 tried", + "received: addr (301 bytes) peer=1", ] ): self.send_addr_msg(addr_source, msg, receivers) total_ipv4_received = sum(r.num_ipv4_received for r in receivers) # Every IPv4 address must be relayed to two peers, other than the # originating node (addr_source). ipv4_branching_factor = 2 - assert_equal( - total_ipv4_received, - num_ipv4_addrs * - ipv4_branching_factor) + assert_equal(total_ipv4_received, num_ipv4_addrs * ipv4_branching_factor) self.nodes[0].disconnect_p2ps() - self.log.info('Check relay of addresses received from outbound peers') + self.log.info("Check relay of addresses received from outbound peers") inbound_peer = self.nodes[0].add_p2p_connection( - AddrReceiver(test_addr_contents=True, send_getaddr=False)) + AddrReceiver(test_addr_contents=True, send_getaddr=False) + ) full_outbound_peer = self.nodes[0].add_outbound_p2p_connection( - AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay") + AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay" + ) msg = self.setup_addr_msg(2) self.send_addr_msg(full_outbound_peer, msg, [inbound_peer]) self.log.info( - 'Check that the first addr message received from an outbound peer is not relayed') + "Check that the first addr message received from an outbound peer is not" + " relayed" + ) # Currently, there is a flag that prevents the first addr message # received from a new outbound peer to be relayed to others. Originally # meant to prevent large GETADDR responses from being relayed, it now # typically affects the self-announcement of the outbound peer which is # often sent before the GETADDR response. assert_equal(inbound_peer.num_ipv4_received, 0) # Send an empty ADDR message to initialize address relay on this # connection. inbound_peer.send_and_ping(msg_addr()) self.log.info( - 'Check that subsequent addr messages sent from an outbound peer are relayed') + "Check that subsequent addr messages sent from an outbound peer are relayed" + ) msg2 = self.setup_addr_msg(2) self.send_addr_msg(full_outbound_peer, msg2, [inbound_peer]) assert_equal(inbound_peer.num_ipv4_received, 2) - self.log.info('Check address relay to outbound peers') + self.log.info("Check address relay to outbound peers") block_relay_peer = self.nodes[0].add_outbound_p2p_connection( - AddrReceiver(), p2p_idx=1, connection_type="block-relay-only") + AddrReceiver(), p2p_idx=1, connection_type="block-relay-only" + ) msg3 = self.setup_addr_msg(2) - self.send_addr_msg( - inbound_peer, msg3, - [full_outbound_peer, block_relay_peer]) + self.send_addr_msg(inbound_peer, msg3, [full_outbound_peer, block_relay_peer]) - self.log.info( - 'Check that addresses are relayed to full outbound peers') + self.log.info("Check that addresses are relayed to full outbound peers") assert_equal(full_outbound_peer.num_ipv4_received, 2) self.log.info( - 'Check that addresses are not relayed to block-relay-only outbound peers') + "Check that addresses are not relayed to block-relay-only outbound peers" + ) assert_equal(block_relay_peer.num_ipv4_received, 0) self.nodes[0].disconnect_p2ps() def sum_addr_messages(self, msgs_dict): - return sum(bytes_received for (msg, bytes_received) - in msgs_dict.items() if msg in ['addr', 'addrv2', 'getaddr']) + return sum( + bytes_received + for (msg, bytes_received) in msgs_dict.items() + if msg in ["addr", "addrv2", "getaddr"] + ) def inbound_blackhole_tests(self): self.log.info( - 'Check that we only relay addresses to inbound peers who have previously sent us addr related messages') + "Check that we only relay addresses to inbound peers who have previously" + " sent us addr related messages" + ) addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) receiver_peer = self.nodes[0].add_p2p_connection(AddrReceiver()) blackhole_peer = self.nodes[0].add_p2p_connection( - AddrReceiver(send_getaddr=False)) + AddrReceiver(send_getaddr=False) + ) initial_addrs_received = receiver_peer.num_ipv4_received peerinfo = self.nodes[0].getpeerinfo() - assert_equal(peerinfo[0]['addr_relay_enabled'], True) # addr_source - assert_equal(peerinfo[1]['addr_relay_enabled'], True) # receiver_peer - assert_equal( - peerinfo[2]['addr_relay_enabled'], - False) # blackhole_peer + assert_equal(peerinfo[0]["addr_relay_enabled"], True) # addr_source + assert_equal(peerinfo[1]["addr_relay_enabled"], True) # receiver_peer + assert_equal(peerinfo[2]["addr_relay_enabled"], False) # blackhole_peer # addr_source sends 2 addresses to node0 msg = self.setup_addr_msg(2) addr_source.send_and_ping(msg) self.mocktime += 30 * 60 self.nodes[0].setmocktime(self.mocktime) receiver_peer.sync_with_ping() blackhole_peer.sync_with_ping() peerinfo = self.nodes[0].getpeerinfo() # Confirm node received addr-related messages from receiver peer - assert_greater_than( - self.sum_addr_messages( - peerinfo[1]['bytesrecv_per_msg']), 0) + assert_greater_than(self.sum_addr_messages(peerinfo[1]["bytesrecv_per_msg"]), 0) # And that peer received addresses - assert_equal( - receiver_peer.num_ipv4_received - - initial_addrs_received, - 2) + assert_equal(receiver_peer.num_ipv4_received - initial_addrs_received, 2) # Confirm node has not received addr-related messages from blackhole # peer - assert_equal( - self.sum_addr_messages( - peerinfo[2]['bytesrecv_per_msg']), 0) + assert_equal(self.sum_addr_messages(peerinfo[2]["bytesrecv_per_msg"]), 0) # And that peer did not receive addresses assert_equal(blackhole_peer.num_ipv4_received, 0) self.log.info( - "After blackhole peer sends addr message, it becomes eligible for addr gossip") + "After blackhole peer sends addr message, it becomes eligible for addr" + " gossip" + ) blackhole_peer.send_and_ping(msg_addr()) # Confirm node has now received addr-related messages from blackhole # peer - assert_greater_than( - self.sum_addr_messages( - peerinfo[1]['bytesrecv_per_msg']), 0) - assert_equal(self.nodes[0].getpeerinfo()[2] - ['addr_relay_enabled'], True) + assert_greater_than(self.sum_addr_messages(peerinfo[1]["bytesrecv_per_msg"]), 0) + assert_equal(self.nodes[0].getpeerinfo()[2]["addr_relay_enabled"], True) msg = self.setup_addr_msg(2) self.send_addr_msg(addr_source, msg, [receiver_peer, blackhole_peer]) # And that peer received addresses assert_equal(blackhole_peer.num_ipv4_received, 2) self.nodes[0].disconnect_p2ps() def getaddr_tests(self): # In the previous tests, the node answered GETADDR requests with an # empty addrman. Due to GETADDR response caching (see # CConnman::GetAddresses), the node would continue to provide 0 addrs # in response until enough time has passed or the node is restarted. self.restart_node(0) - self.log.info('Test getaddr behavior') + self.log.info("Test getaddr behavior") self.log.info( - 'Check that we send a getaddr message upon connecting to an outbound-full-relay peer') + "Check that we send a getaddr message upon connecting to an" + " outbound-full-relay peer" + ) full_outbound_peer = self.nodes[0].add_outbound_p2p_connection( - AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay") + AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay" + ) full_outbound_peer.sync_with_ping() assert full_outbound_peer.getaddr_received() self.log.info( - 'Check that we do not send a getaddr message upon connecting to a block-relay-only peer') + "Check that we do not send a getaddr message upon connecting to a" + " block-relay-only peer" + ) block_relay_peer = self.nodes[0].add_outbound_p2p_connection( - AddrReceiver(), p2p_idx=1, connection_type="block-relay-only") + AddrReceiver(), p2p_idx=1, connection_type="block-relay-only" + ) block_relay_peer.sync_with_ping() assert_equal(block_relay_peer.getaddr_received(), False) - self.log.info( - 'Check that we answer getaddr messages only from inbound peers') + self.log.info("Check that we answer getaddr messages only from inbound peers") inbound_peer = self.nodes[0].add_p2p_connection( - AddrReceiver(send_getaddr=False)) + AddrReceiver(send_getaddr=False) + ) inbound_peer.sync_with_ping() # Add some addresses to addrman for i in range(1000): first_octet = i >> 8 second_octet = i % 256 a = f"{first_octet}.{second_octet}.1.1" self.nodes[0].addpeeraddress(a, 8333) full_outbound_peer.send_and_ping(msg_getaddr()) block_relay_peer.send_and_ping(msg_getaddr()) inbound_peer.send_and_ping(msg_getaddr()) self.mocktime += 5 * 60 self.nodes[0].setmocktime(self.mocktime) inbound_peer.wait_until(lambda: inbound_peer.addr_received() is True) assert_equal(full_outbound_peer.num_ipv4_received, 0) assert_equal(block_relay_peer.num_ipv4_received, 0) assert inbound_peer.num_ipv4_received > 100 self.nodes[0].disconnect_p2ps() def blocksonly_mode_tests(self): - self.log.info('Test addr relay in -blocksonly mode') + self.log.info("Test addr relay in -blocksonly mode") self.restart_node(0, ["-blocksonly", "-whitelist=addr@127.0.0.1"]) self.mocktime = int(time.time()) - self.log.info('Check that we send getaddr messages') + self.log.info("Check that we send getaddr messages") full_outbound_peer = self.nodes[0].add_outbound_p2p_connection( - AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay") + AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay" + ) full_outbound_peer.sync_with_ping() assert full_outbound_peer.getaddr_received() - self.log.info('Check that we relay address messages') + self.log.info("Check that we relay address messages") addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) msg = self.setup_addr_msg(2) self.send_addr_msg(addr_source, msg, [full_outbound_peer]) assert_equal(full_outbound_peer.num_ipv4_received, 2) self.nodes[0].disconnect_p2ps() - def send_addrs_and_test_rate_limiting(self, peer, no_relay, *, new_addrs, - total_addrs): + def send_addrs_and_test_rate_limiting( + self, peer, no_relay, *, new_addrs, total_addrs + ): """Send an addr message and check that the number of addresses processed - and rate-limited is as expected + and rate-limited is as expected """ peer.send_and_ping(self.setup_rand_addr_msg(new_addrs)) peerinfo = self.nodes[0].getpeerinfo()[0] - addrs_processed = peerinfo['addr_processed'] - addrs_rate_limited = peerinfo['addr_rate_limited'] - self.log.debug(f"addrs_processed = {addrs_processed}, " - f"addrs_rate_limited = {addrs_rate_limited}") + addrs_processed = peerinfo["addr_processed"] + addrs_rate_limited = peerinfo["addr_rate_limited"] + self.log.debug( + f"addrs_processed = {addrs_processed}, " + f"addrs_rate_limited = {addrs_rate_limited}" + ) if no_relay: assert_equal(addrs_processed, 0) assert_equal(addrs_rate_limited, 0) else: assert_equal(addrs_processed, min(total_addrs, peer.tokens)) assert_equal(addrs_rate_limited, max(0, total_addrs - peer.tokens)) def rate_limit_tests(self): self.mocktime = int(time.time()) self.restart_node(0, []) self.nodes[0].setmocktime(self.mocktime) for conn_type, no_relay in [ ("outbound-full-relay", False), ("block-relay-only", True), - ("inbound", False) + ("inbound", False), ]: self.log.info( - f'Test rate limiting of addr processing for {conn_type} peers') + f"Test rate limiting of addr processing for {conn_type} peers" + ) if conn_type == "inbound": peer = self.nodes[0].add_p2p_connection(AddrReceiver()) else: peer = self.nodes[0].add_outbound_p2p_connection( - AddrReceiver(), p2p_idx=0, connection_type=conn_type) + AddrReceiver(), p2p_idx=0, connection_type=conn_type + ) # Send 600 addresses. For all but the block-relay-only peer this # should result in addresses being processed. self.send_addrs_and_test_rate_limiting( - peer, no_relay, new_addrs=600, total_addrs=600) + peer, no_relay, new_addrs=600, total_addrs=600 + ) # Send 600 more addresses. For the outbound-full-relay peer (which # we send a GETADDR, and thus will process up to 1001 incoming # addresses), this means more addresses will be processed. self.send_addrs_and_test_rate_limiting( - peer, no_relay, new_addrs=600, total_addrs=1200) + peer, no_relay, new_addrs=600, total_addrs=1200 + ) # Send 10 more. As we reached the processing limit for all nodes, # no more addresses should be procesesd. self.send_addrs_and_test_rate_limiting( - peer, no_relay, new_addrs=10, total_addrs=1210) + peer, no_relay, new_addrs=10, total_addrs=1210 + ) # Advance the time by 100 seconds, permitting the processing of 10 # more addresses. # Send 200 and verify that 10 are processed. self.mocktime += 100 self.nodes[0].setmocktime(self.mocktime) peer.increment_tokens(10) self.send_addrs_and_test_rate_limiting( - peer, no_relay, new_addrs=200, total_addrs=1410) + peer, no_relay, new_addrs=200, total_addrs=1410 + ) # Advance the time by 1000 seconds, permitting the processing of 100 # more addresses. # Send 200 and verify that 100 are processed. self.mocktime += 1000 self.nodes[0].setmocktime(self.mocktime) peer.increment_tokens(100) self.send_addrs_and_test_rate_limiting( - peer, no_relay, new_addrs=200, total_addrs=1610) + peer, no_relay, new_addrs=200, total_addrs=1610 + ) self.nodes[0].disconnect_p2ps() -if __name__ == '__main__': +if __name__ == "__main__": AddrTest().main() diff --git a/test/functional/p2p_addrfetch.py b/test/functional/p2p_addrfetch.py index 0d1e55849..462e2ece0 100755 --- a/test/functional/p2p_addrfetch.py +++ b/test/functional/p2p_addrfetch.py @@ -1,85 +1,91 @@ #!/usr/bin/env python3 # Copyright (c) 2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test p2p addr-fetch connections """ import time from test_framework.messages import NODE_NETWORK, CAddress, msg_addr from test_framework.p2p import P2PInterface, p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal ADDR = CAddress() ADDR.time = int(time.time()) ADDR.nServices = NODE_NETWORK ADDR.ip = "192.0.0.8" ADDR.port = 18444 class P2PAddrFetch(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def assert_getpeerinfo(self, *, peer_ids): num_peers = len(peer_ids) info = self.nodes[0].getpeerinfo() assert_equal(len(info), num_peers) for n in range(0, num_peers): - assert_equal(info[n]['id'], peer_ids[n]) - assert_equal(info[n]['connection_type'], 'addr-fetch') + assert_equal(info[n]["id"], peer_ids[n]) + assert_equal(info[n]["connection_type"], "addr-fetch") def run_test(self): node = self.nodes[0] self.log.info("Connect to an addr-fetch peer") peer_id = 0 peer = node.add_outbound_p2p_connection( - P2PInterface(), p2p_idx=peer_id, connection_type="addr-fetch") + P2PInterface(), p2p_idx=peer_id, connection_type="addr-fetch" + ) self.assert_getpeerinfo(peer_ids=[peer_id]) self.log.info( - "Check that we send getaddr but don't try to sync headers with the addr-fetch peer") + "Check that we send getaddr but don't try to sync headers with the" + " addr-fetch peer" + ) peer.sync_send_with_ping() with p2p_lock: - assert peer.message_count['getaddr'] == 1 - assert peer.message_count['getheaders'] == 0 + assert peer.message_count["getaddr"] == 1 + assert peer.message_count["getheaders"] == 0 self.log.info( - "Check that answering the getaddr with a single address does not lead to disconnect") + "Check that answering the getaddr with a single address does not lead to" + " disconnect" + ) # This prevents disconnecting on self-announcements msg = msg_addr() msg.addrs = [ADDR] peer.send_and_ping(msg) self.assert_getpeerinfo(peer_ids=[peer_id]) self.log.info( - "Check that answering with larger addr messages leads to disconnect") + "Check that answering with larger addr messages leads to disconnect" + ) msg.addrs = [ADDR] * 2 peer.send_message(msg) peer.wait_for_disconnect(timeout=5) - self.log.info( - "Check timeout for addr-fetch peer that does not send addrs") + self.log.info("Check timeout for addr-fetch peer that does not send addrs") peer_id = 1 peer = node.add_outbound_p2p_connection( - P2PInterface(), p2p_idx=peer_id, connection_type="addr-fetch") + P2PInterface(), p2p_idx=peer_id, connection_type="addr-fetch" + ) time_now = int(time.time()) self.assert_getpeerinfo(peer_ids=[peer_id]) # Expect addr-fetch peer connection to be maintained up to 5 minutes. node.setmocktime(time_now + 295) self.assert_getpeerinfo(peer_ids=[peer_id]) # Expect addr-fetch peer connection to be disconnected after 5 minutes. node.setmocktime(time_now + 301) peer.wait_for_disconnect(timeout=5) self.assert_getpeerinfo(peer_ids=[]) -if __name__ == '__main__': +if __name__ == "__main__": P2PAddrFetch().main() diff --git a/test/functional/p2p_addrv2_relay.py b/test/functional/p2p_addrv2_relay.py index 9ebac6dcc..d70fd4306 100755 --- a/test/functional/p2p_addrv2_relay.py +++ b/test/functional/p2p_addrv2_relay.py @@ -1,74 +1,77 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test addrv2 relay """ import time from test_framework.messages import NODE_NETWORK, CAddress, msg_addrv2 from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework ADDRS = [] for i in range(10): addr = CAddress() addr.time = int(time.time()) + i addr.nServices = NODE_NETWORK addr.ip = f"123.123.123.{i % 256}" addr.port = 8333 + i ADDRS.append(addr) class AddrReceiver(P2PInterface): addrv2_received_and_checked = False def __init__(self): super().__init__(support_addrv2=True) def on_addrv2(self, message): expected_set = {(addr.ip, addr.port) for addr in ADDRS} received_set = {(addr.ip, addr.port) for addr in message.addrs} if expected_set == received_set: self.addrv2_received_and_checked = True def wait_for_addrv2(self): self.wait_until(lambda: "addrv2" in self.last_message) class AddrTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [["-whitelist=addr@127.0.0.1"]] def run_test(self): - self.log.info('Create connection that sends addrv2 messages') + self.log.info("Create connection that sends addrv2 messages") addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) msg = msg_addrv2() - self.log.info('Send too-large addrv2 message') + self.log.info("Send too-large addrv2 message") msg.addrs = ADDRS * 101 - with self.nodes[0].assert_debug_log(['addrv2 message size = 1010']): + with self.nodes[0].assert_debug_log(["addrv2 message size = 1010"]): addr_source.send_and_ping(msg) self.log.info( - 'Check that addrv2 message content is relayed and added to addrman') + "Check that addrv2 message content is relayed and added to addrman" + ) addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver()) msg.addrs = ADDRS - with self.nodes[0].assert_debug_log([ - 'Added 10 addresses from 127.0.0.1: 0 tried', - 'received: addrv2 (131 bytes) peer=0', - 'sending addrv2 (131 bytes) peer=1', - ]): + with self.nodes[0].assert_debug_log( + [ + "Added 10 addresses from 127.0.0.1: 0 tried", + "received: addrv2 (131 bytes) peer=0", + "sending addrv2 (131 bytes) peer=1", + ] + ): addr_source.send_and_ping(msg) self.nodes[0].setmocktime(int(time.time()) + 30 * 60) addr_receiver.wait_for_addrv2() assert addr_receiver.addrv2_received_and_checked -if __name__ == '__main__': +if __name__ == "__main__": AddrTest().main() diff --git a/test/functional/p2p_blockfilters.py b/test/functional/p2p_blockfilters.py index 824a6d649..286907cc1 100755 --- a/test/functional/p2p_blockfilters.py +++ b/test/functional/p2p_blockfilters.py @@ -1,269 +1,273 @@ #!/usr/bin/env python3 # Copyright (c) 2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Tests NODE_COMPACT_FILTERS (BIP 157/158). Tests that a node configured with -blockfilterindex and -peerblockfilters signals NODE_COMPACT_FILTERS and can serve cfilters, cfheaders and cfcheckpts. """ from test_framework.messages import ( FILTER_TYPE_BASIC, NODE_COMPACT_FILTERS, hash256, msg_getcfcheckpt, msg_getcfheaders, msg_getcfilters, ser_uint256, uint256_from_str, ) from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class FiltersClient(P2PInterface): def __init__(self): super().__init__() # Store the cfilters received. self.cfilters = [] def pop_cfilters(self): cfilters = self.cfilters self.cfilters = [] return cfilters def on_cfilter(self, message): """Store cfilters received in a list.""" self.cfilters.append(message) class CompactFiltersTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.rpc_timeout = 480 self.num_nodes = 2 self.extra_args = [ ["-blockfilterindex", "-peerblockfilters"], ["-blockfilterindex"], ] def run_test(self): # Node 0 supports COMPACT_FILTERS, node 1 does not. peer_0 = self.nodes[0].add_p2p_connection(FiltersClient()) peer_1 = self.nodes[1].add_p2p_connection(FiltersClient()) # Nodes 0 & 1 share the same first 999 blocks in the chain. self.generate(self.nodes[0], 999, sync_fun=self.no_op) # Sync the blocks. Since they are numerous, bump the timeout so it # doesn't fail on slow machines. self.sync_blocks(timeout=360) # Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting self.disconnect_nodes(0, 1) - stale_block_hash = self.generate( - self.nodes[0], 1, sync_fun=self.no_op)[0] + stale_block_hash = self.generate(self.nodes[0], 1, sync_fun=self.no_op)[0] self.nodes[0].syncwithvalidationinterfacequeue() assert_equal(self.nodes[0].getblockcount(), 1000) self.generate(self.nodes[1], 1001, sync_fun=self.no_op) assert_equal(self.nodes[1].getblockcount(), 2000) # Check that nodes have signalled NODE_COMPACT_FILTERS correctly. assert peer_0.nServices & NODE_COMPACT_FILTERS != 0 assert peer_1.nServices & NODE_COMPACT_FILTERS == 0 # Check that the localservices is as expected. - assert int( - self.nodes[0].getnetworkinfo()['localservices'], - 16) & NODE_COMPACT_FILTERS != 0 - assert int( - self.nodes[1].getnetworkinfo()['localservices'], - 16) & NODE_COMPACT_FILTERS == 0 + assert ( + int(self.nodes[0].getnetworkinfo()["localservices"], 16) + & NODE_COMPACT_FILTERS + != 0 + ) + assert ( + int(self.nodes[1].getnetworkinfo()["localservices"], 16) + & NODE_COMPACT_FILTERS + == 0 + ) self.log.info("get cfcheckpt on chain to be re-orged out.") request = msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=int(stale_block_hash, 16), ) peer_0.send_and_ping(message=request) - response = peer_0.last_message['cfcheckpt'] + response = peer_0.last_message["cfcheckpt"] assert_equal(response.filter_type, request.filter_type) assert_equal(response.stop_hash, request.stop_hash) assert_equal(len(response.headers), 1) self.log.info("Reorg node 0 to a new chain.") self.connect_nodes(0, 1) self.sync_blocks(timeout=600) self.nodes[0].syncwithvalidationinterfacequeue() main_block_hash = self.nodes[0].getblockhash(1000) assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize" self.log.info("Check that peers can fetch cfcheckpt on active chain.") tip_hash = self.nodes[0].getbestblockhash() request = msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=int(tip_hash, 16), ) peer_0.send_and_ping(request) - response = peer_0.last_message['cfcheckpt'] + response = peer_0.last_message["cfcheckpt"] assert_equal(response.filter_type, request.filter_type) assert_equal(response.stop_hash, request.stop_hash) - main_cfcheckpt = self.nodes[0].getblockfilter( - main_block_hash, 'basic')['header'] - tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash, 'basic')[ - 'header'] + main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash, "basic")[ + "header" + ] + tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash, "basic")["header"] assert_equal( response.headers, [int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)], ) self.log.info("Check that peers can fetch cfcheckpt on stale chain.") request = msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=int(stale_block_hash, 16), ) peer_0.send_and_ping(request) - response = peer_0.last_message['cfcheckpt'] + response = peer_0.last_message["cfcheckpt"] - stale_cfcheckpt = self.nodes[0].getblockfilter( - stale_block_hash, 'basic')['header'] + stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash, "basic")[ + "header" + ] assert_equal( response.headers, - [int(header, 16) for header in (stale_cfcheckpt, )], + [int(header, 16) for header in (stale_cfcheckpt,)], ) self.log.info("Check that peers can fetch cfheaders on active chain.") request = msg_getcfheaders( filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(main_block_hash, 16), ) peer_0.send_and_ping(request) - response = peer_0.last_message['cfheaders'] + response = peer_0.last_message["cfheaders"] main_cfhashes = response.hashes assert_equal(len(main_cfhashes), 1000) assert_equal( compute_last_header(response.prev_header, response.hashes), int(main_cfcheckpt, 16), ) self.log.info("Check that peers can fetch cfheaders on stale chain.") request = msg_getcfheaders( filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(stale_block_hash, 16), ) peer_0.send_and_ping(request) - response = peer_0.last_message['cfheaders'] + response = peer_0.last_message["cfheaders"] stale_cfhashes = response.hashes assert_equal(len(stale_cfhashes), 1000) assert_equal( compute_last_header(response.prev_header, response.hashes), int(stale_cfcheckpt, 16), ) self.log.info("Check that peers can fetch cfilters.") stop_hash = self.nodes[0].getblockhash(10) request = msg_getcfilters( filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(stop_hash, 16), ) peer_0.send_and_ping(request) response = peer_0.pop_cfilters() assert_equal(len(response), 10) self.log.info("Check that cfilter responses are correct.") - for cfilter, cfhash, height in zip( - response, main_cfhashes, range(1, 11)): + for cfilter, cfhash, height in zip(response, main_cfhashes, range(1, 11)): block_hash = self.nodes[0].getblockhash(height) assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC) assert_equal(cfilter.block_hash, int(block_hash, 16)) computed_cfhash = uint256_from_str(hash256(cfilter.filter_data)) assert_equal(computed_cfhash, cfhash) self.log.info("Check that peers can fetch cfilters for stale blocks.") request = msg_getcfilters( filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(stale_block_hash, 16), ) peer_0.send_and_ping(request) response = peer_0.pop_cfilters() assert_equal(len(response), 1) cfilter = response[0] assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC) assert_equal(cfilter.block_hash, int(stale_block_hash, 16)) computed_cfhash = uint256_from_str(hash256(cfilter.filter_data)) assert_equal(computed_cfhash, stale_cfhashes[999]) self.log.info( - "Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection.") + "Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection." + ) requests = [ msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=int(main_block_hash, 16), ), msg_getcfheaders( filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(main_block_hash, 16), ), msg_getcfilters( filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(main_block_hash, 16), ), ] for request in requests: peer_1 = self.nodes[1].add_p2p_connection(P2PInterface()) peer_1.send_message(request) peer_1.wait_for_disconnect() self.log.info("Check that invalid requests result in disconnection.") requests = [ # Requesting too many filters results in disconnection. msg_getcfilters( filter_type=FILTER_TYPE_BASIC, start_height=0, stop_hash=int(main_block_hash, 16), ), # Requesting too many filter headers results in disconnection. msg_getcfheaders( filter_type=FILTER_TYPE_BASIC, start_height=0, stop_hash=int(tip_hash, 16), ), # Requesting unknown filter type results in disconnection. msg_getcfcheckpt( filter_type=255, stop_hash=int(main_block_hash, 16), ), # Requesting unknown hash results in disconnection. msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=123456789, ), ] for request in requests: peer_0 = self.nodes[0].add_p2p_connection(P2PInterface()) peer_0.send_message(request) peer_0.wait_for_disconnect() def compute_last_header(prev_header, hashes): """Compute the last filter header from a starting header and a sequence of filter hashes.""" header = ser_uint256(prev_header) for filter_hash in hashes: header = hash256(ser_uint256(filter_hash) + header) return uint256_from_str(header) -if __name__ == '__main__': +if __name__ == "__main__": CompactFiltersTest().main() diff --git a/test/functional/p2p_blocksonly.py b/test/functional/p2p_blocksonly.py index 85273ae2a..11c6b5d7d 100755 --- a/test/functional/p2p_blocksonly.py +++ b/test/functional/p2p_blocksonly.py @@ -1,144 +1,154 @@ #!/usr/bin/env python3 # Copyright (c) 2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test p2p blocksonly mode & block-relay-only connections.""" import time from test_framework.messages import MSG_TX, CInv, msg_inv, msg_tx from test_framework.p2p import P2PInterface, P2PTxInvStore from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal from test_framework.wallet import MiniWallet class P2PBlocksOnly(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [["-blocksonly"]] def run_test(self): self.miniwallet = MiniWallet(self.nodes[0]) # Add enough mature utxos to the wallet, so that all txs spend # confirmed coins self.miniwallet.rescan_utxos() self.blocksonly_mode_tests() self.blocks_relay_conn_tests() def blocksonly_mode_tests(self): self.log.info("Tests with node running in -blocksonly mode") - assert_equal(self.nodes[0].getnetworkinfo()['localrelay'], False) + assert_equal(self.nodes[0].getnetworkinfo()["localrelay"], False) self.nodes[0].add_p2p_connection(P2PInterface()) tx, txid, tx_hex = self.check_p2p_tx_violation() - self.log.info('Check that tx invs also violate the protocol') + self.log.info("Check that tx invs also violate the protocol") self.nodes[0].add_p2p_connection(P2PInterface()) - with self.nodes[0].assert_debug_log(['transaction (0000000000000000000000000000000000000000000000000000000000001234) inv sent in violation of protocol, disconnecting peer']): - self.nodes[0].p2ps[0].send_message( - msg_inv([CInv(t=MSG_TX, h=0x1234)])) + with self.nodes[0].assert_debug_log( + [ + "transaction" + " (0000000000000000000000000000000000000000000000000000000000001234)" + " inv sent in violation of protocol, disconnecting peer" + ] + ): + self.nodes[0].p2ps[0].send_message(msg_inv([CInv(t=MSG_TX, h=0x1234)])) self.nodes[0].p2ps[0].wait_for_disconnect() del self.nodes[0].p2ps[0] self.log.info( - 'Check that txs from rpc are not rejected and relayed to other peers') + "Check that txs from rpc are not rejected and relayed to other peers" + ) tx_relay_peer = self.nodes[0].add_p2p_connection(P2PInterface()) - assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], True) - assert_equal( - self.nodes[0].testmempoolaccept( - [tx_hex])[0]['allowed'], True) - with self.nodes[0].assert_debug_log([f'received getdata for: tx {txid} peer']): + assert_equal(self.nodes[0].getpeerinfo()[0]["relaytxes"], True) + assert_equal(self.nodes[0].testmempoolaccept([tx_hex])[0]["allowed"], True) + with self.nodes[0].assert_debug_log([f"received getdata for: tx {txid} peer"]): self.nodes[0].sendrawtransaction(tx_hex) tx_relay_peer.wait_for_tx(txid) - assert_equal(self.nodes[0].getmempoolinfo()['size'], 1) + assert_equal(self.nodes[0].getmempoolinfo()["size"], 1) self.log.info("Restarting node 0 with relay permission and blocksonly") - self.restart_node(0, - ["-persistmempool=0", - "-whitelist=relay@127.0.0.1", - "-blocksonly", - '-deprecatedrpc=whitelisted']) + self.restart_node( + 0, + [ + "-persistmempool=0", + "-whitelist=relay@127.0.0.1", + "-blocksonly", + "-deprecatedrpc=whitelisted", + ], + ) assert_equal(self.nodes[0].getrawmempool(), []) first_peer = self.nodes[0].add_p2p_connection(P2PInterface()) second_peer = self.nodes[0].add_p2p_connection(P2PInterface()) peer_1_info = self.nodes[0].getpeerinfo()[0] - assert_equal(peer_1_info['permissions'], ['relay']) + assert_equal(peer_1_info["permissions"], ["relay"]) peer_2_info = self.nodes[0].getpeerinfo()[1] - assert_equal(peer_2_info['permissions'], ['relay']) - assert_equal( - self.nodes[0].testmempoolaccept( - [tx_hex])[0]['allowed'], True) + assert_equal(peer_2_info["permissions"], ["relay"]) + assert_equal(self.nodes[0].testmempoolaccept([tx_hex])[0]["allowed"], True) self.log.info( - 'Check that the tx from first_peer with relay-permission is ' - 'relayed to others (ie.second_peer)') + "Check that the tx from first_peer with relay-permission is " + "relayed to others (ie.second_peer)" + ) with self.nodes[0].assert_debug_log(["received getdata"]): # Note that normally, first_peer would never send us transactions # since we're a blocksonly node. By activating blocksonly, we # explicitly tell our peers that they should not send us # transactions, and Bitcoin ABC respects that choice and will not # send transactions. # But if, for some reason, first_peer decides to relay transactions # to us anyway, we should relay them to second_peer since we gave # relay permission to first_peer. # See https://github.com/bitcoin/bitcoin/issues/19943 for details. first_peer.send_message(msg_tx(tx)) self.log.info( - 'Check that the peer with relay-permission is still connected' - ' after sending the transaction') + "Check that the peer with relay-permission is still connected" + " after sending the transaction" + ) assert_equal(first_peer.is_connected, True) second_peer.wait_for_tx(txid) - assert_equal(self.nodes[0].getmempoolinfo()['size'], 1) - self.log.info( - "Relay-permission peer's transaction is accepted and relayed") + assert_equal(self.nodes[0].getmempoolinfo()["size"], 1) + self.log.info("Relay-permission peer's transaction is accepted and relayed") self.nodes[0].disconnect_p2ps() self.generate(self.nodes[0], 1) def blocks_relay_conn_tests(self): self.log.info( - 'Tests with node in normal mode with block-relay-only connections') + "Tests with node in normal mode with block-relay-only connections" + ) # disables blocks only mode self.restart_node(0, ["-noblocksonly"]) - assert_equal(self.nodes[0].getnetworkinfo()['localrelay'], True) + assert_equal(self.nodes[0].getnetworkinfo()["localrelay"], True) # Ensure we disconnect if a block-relay-only connection sends us a # transaction self.nodes[0].add_outbound_p2p_connection( - P2PInterface(), p2p_idx=0, connection_type="block-relay-only") - assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], False) + P2PInterface(), p2p_idx=0, connection_type="block-relay-only" + ) + assert_equal(self.nodes[0].getpeerinfo()[0]["relaytxes"], False) _, txid, tx_hex = self.check_p2p_tx_violation() - self.log.info( - "Check that txs from RPC are not sent to blockrelay connection") + self.log.info("Check that txs from RPC are not sent to blockrelay connection") conn = self.nodes[0].add_outbound_p2p_connection( - P2PTxInvStore(), p2p_idx=1, connection_type="block-relay-only") + P2PTxInvStore(), p2p_idx=1, connection_type="block-relay-only" + ) self.nodes[0].sendrawtransaction(tx_hex) # Bump time forward to ensure nNextInvSend timer pops self.nodes[0].setmocktime(int(time.time()) + 60) conn.sync_send_with_ping() assert int(txid, 16) not in conn.get_invs() def check_p2p_tx_violation(self): - self.log.info( - 'Check that txs from P2P are rejected and result in disconnect') + self.log.info("Check that txs from P2P are rejected and result in disconnect") spendtx = self.miniwallet.create_self_transfer(from_node=self.nodes[0]) - with self.nodes[0].assert_debug_log(['transaction sent in violation of protocol peer=0']): - self.nodes[0].p2ps[0].send_message(msg_tx(spendtx['tx'])) + with self.nodes[0].assert_debug_log( + ["transaction sent in violation of protocol peer=0"] + ): + self.nodes[0].p2ps[0].send_message(msg_tx(spendtx["tx"])) self.nodes[0].p2ps[0].wait_for_disconnect() - assert_equal(self.nodes[0].getmempoolinfo()['size'], 0) + assert_equal(self.nodes[0].getmempoolinfo()["size"], 0) # Remove the disconnected peer del self.nodes[0].p2ps[0] - return spendtx['tx'], spendtx['txid'], spendtx['hex'] + return spendtx["tx"], spendtx["txid"], spendtx["hex"] -if __name__ == '__main__': +if __name__ == "__main__": P2PBlocksOnly().main() diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index ea83cfcc6..63165f574 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -1,941 +1,966 @@ #!/usr/bin/env python3 # Copyright (c) 2016-2019 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test compact blocks (BIP 152). Only testing Version 1 compact blocks (txids) """ import random from test_framework.blocktools import create_block from test_framework.messages import ( MSG_BLOCK, MSG_CMPCT_BLOCK, NODE_NETWORK, BlockTransactions, BlockTransactionsRequest, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, HeaderAndShortIDs, P2PHeaderAndShortIDs, PrefilledTransaction, ToHex, calculate_shortid, msg_block, msg_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, ) from test_framework.p2p import P2PInterface, p2p_lock from test_framework.script import OP_TRUE, CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import assert_equal, uint256_hex # TestP2PConn: A peer we use to send messages to bitcoind, and store responses. class TestP2PConn(P2PInterface): def __init__(self): super().__init__() self.last_sendcmpct = [] self.block_announced = False # Store the hashes of blocks we've seen announced. # This is for synchronizing the p2p message traffic, # so we can eg wait until a particular block is announced. self.announced_blockhashes = set() def on_sendcmpct(self, message): self.last_sendcmpct.append(message) def on_cmpctblock(self, message): self.block_announced = True self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() self.announced_blockhashes.add( - self.last_message["cmpctblock"].header_and_shortids.header.sha256) + self.last_message["cmpctblock"].header_and_shortids.header.sha256 + ) def on_headers(self, message): self.block_announced = True for x in self.last_message["headers"].headers: x.calc_sha256() self.announced_blockhashes.add(x.sha256) def on_inv(self, message): for x in self.last_message["inv"].inv: if x.type == MSG_BLOCK: self.block_announced = True self.announced_blockhashes.add(x.hash) # Requires caller to hold p2p_lock def received_block_announcement(self): return self.block_announced def clear_block_announcement(self): with p2p_lock: self.block_announced = False self.last_message.pop("inv", None) self.last_message.pop("headers", None) self.last_message.pop("cmpctblock", None) def get_headers(self, locator, hashstop): msg = msg_getheaders() msg.locator.vHave = locator msg.hashstop = hashstop self.send_message(msg) def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) def request_headers_and_sync(self, locator, hashstop=0): self.clear_block_announcement() self.get_headers(locator, hashstop) self.wait_until(self.received_block_announcement, timeout=30) self.clear_block_announcement() # Block until a block announcement for a particular block hash is # received. def wait_for_block_announcement(self, block_hash, timeout=30): def received_hash(): - return (block_hash in self.announced_blockhashes) + return block_hash in self.announced_blockhashes + self.wait_until(received_hash, timeout=timeout) def send_await_disconnect(self, message, timeout=30): """Sends a message to the node and wait for disconnect. This is used when we want to send a message into the node that we expect will get us disconnected, eg an invalid block.""" self.send_message(message) self.wait_for_disconnect(timeout) class CompactBlocksTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 - self.extra_args = [["-acceptnonstdtxn=1"], - ["-txindex", "-acceptnonstdtxn=1"]] + self.extra_args = [["-acceptnonstdtxn=1"], ["-txindex", "-acceptnonstdtxn=1"]] self.utxos = [] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def build_block_on_tip(self, node): block = create_block(tmpl=node.getblocktemplate()) block.solve() return block # Create 10 more anyone-can-spend utxo's for testing. def make_utxos(self): # Doesn't matter which node we use, just use node0. block = self.build_block_on_tip(self.nodes[0]) self.test_node.send_and_ping(msg_block(block)) assert int(self.nodes[0].getbestblockhash(), 16) == block.sha256 self.generate(self.nodes[0], 100) total_value = block.vtx[0].vout[0].nValue out_value = total_value // 10 tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b'')) + tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")) for _ in range(10): tx.vout.append(CTxOut(out_value, CScript([OP_TRUE]))) tx.rehash() block2 = self.build_block_on_tip(self.nodes[0]) block2.vtx.append(tx) block2.hashMerkleRoot = block2.calc_merkle_root() block2.solve() self.test_node.send_and_ping(msg_block(block2)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256) self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)]) return # Test "sendcmpct" (between peers preferring the same version): # - No compact block announcements unless sendcmpct is sent. # - If sendcmpct is sent with version > preferred_version, the message is ignored. # - If sendcmpct is sent with boolean 0, then block announcements are not # made with compact blocks. # - If sendcmpct is then sent with boolean 1, then new block announcements # are made with compact blocks. # If old_node is passed in, request compact blocks with version=preferred-1 # and verify that it receives block announcements via compact block. - def test_sendcmpct(self, node, test_node, - preferred_version, old_node=None): + def test_sendcmpct(self, node, test_node, preferred_version, old_node=None): # Make sure we get a SENDCMPCT message from our peer def received_sendcmpct(): - return (len(test_node.last_sendcmpct) > 0) + return len(test_node.last_sendcmpct) > 0 + test_node.wait_until(received_sendcmpct, timeout=30) with p2p_lock: # Check that the first version received is the preferred one - assert_equal( - test_node.last_sendcmpct[0].version, preferred_version) + assert_equal(test_node.last_sendcmpct[0].version, preferred_version) # And that we receive versions down to 1. assert_equal(test_node.last_sendcmpct[-1].version, 1) test_node.last_sendcmpct = [] tip = int(node.getbestblockhash(), 16) def check_announcement_of_new_block(node, peer, predicate): peer.clear_block_announcement() block_hash = int(self.generate(node, 1)[0], 16) peer.wait_for_block_announcement(block_hash, timeout=30) assert peer.block_announced with p2p_lock: assert predicate(peer), ( f"block_hash={block_hash!r}, " f"cmpctblock={peer.last_message.get('cmpctblock', None)!r}, " f"inv={peer.last_message.get('inv', None)!r}" ) # We shouldn't get any block announcements via cmpctblock yet. check_announcement_of_new_block( - node, test_node, lambda p: "cmpctblock" not in p.last_message) + node, test_node, lambda p: "cmpctblock" not in p.last_message + ) # Try one more time, this time after requesting headers. test_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block( - node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message) + node, + test_node, + lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message, + ) # Test a few ways of using sendcmpct that should NOT # result in compact block announcements. # Before each test, sync the headers chain. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with too-high version test_node.send_and_ping(msg_sendcmpct(announce=True, version=999)) check_announcement_of_new_block( - node, test_node, lambda p: "cmpctblock" not in p.last_message) + node, test_node, lambda p: "cmpctblock" not in p.last_message + ) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with valid version, but announce=False - test_node.send_and_ping(msg_sendcmpct(announce=False, - version=preferred_version)) + test_node.send_and_ping( + msg_sendcmpct(announce=False, version=preferred_version) + ) check_announcement_of_new_block( - node, test_node, lambda p: "cmpctblock" not in p.last_message) + node, test_node, lambda p: "cmpctblock" not in p.last_message + ) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Finally, try a SENDCMPCT message with announce=True - test_node.send_and_ping(msg_sendcmpct(announce=True, - version=preferred_version)) + test_node.send_and_ping(msg_sendcmpct(announce=True, version=preferred_version)) check_announcement_of_new_block( - node, test_node, lambda p: "cmpctblock" in p.last_message) + node, test_node, lambda p: "cmpctblock" in p.last_message + ) # Try one more time (no headers sync should be needed!) check_announcement_of_new_block( - node, test_node, lambda p: "cmpctblock" in p.last_message) + node, test_node, lambda p: "cmpctblock" in p.last_message + ) # Try one more time, after turning on sendheaders test_node.send_and_ping(msg_sendheaders()) check_announcement_of_new_block( - node, test_node, lambda p: "cmpctblock" in p.last_message) + node, test_node, lambda p: "cmpctblock" in p.last_message + ) # Try one more time, after sending a version-1, announce=false message. - test_node.send_and_ping(msg_sendcmpct(announce=False, - version=preferred_version - 1)) + test_node.send_and_ping( + msg_sendcmpct(announce=False, version=preferred_version - 1) + ) check_announcement_of_new_block( - node, test_node, lambda p: "cmpctblock" in p.last_message) + node, test_node, lambda p: "cmpctblock" in p.last_message + ) # Now turn off announcements - test_node.send_and_ping(msg_sendcmpct(announce=False, - version=preferred_version)) + test_node.send_and_ping( + msg_sendcmpct(announce=False, version=preferred_version) + ) check_announcement_of_new_block( - node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message) + node, + test_node, + lambda p: "cmpctblock" not in p.last_message + and "headers" in p.last_message, + ) if old_node is not None: # Verify that a peer using an older protocol version can receive # announcements from this node. old_node.send_and_ping(msg_sendcmpct(announce=True, version=1)) # Header sync old_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block( - node, old_node, lambda p: "cmpctblock" in p.last_message) + node, old_node, lambda p: "cmpctblock" in p.last_message + ) # This test actually causes bitcoind to (reasonably!) disconnect us, so do # this last. def test_invalid_cmpctblock_message(self): self.generate(self.nodes[0], 101) block = self.build_block_on_tip(self.nodes[0]) cmpct_block = P2PHeaderAndShortIDs() cmpct_block.header = CBlockHeader(block) cmpct_block.prefilled_txn_length = 1 # This index will be too high prefilled_txn = PrefilledTransaction(1, block.vtx[0]) cmpct_block.prefilled_txn = [prefilled_txn] self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block)) - assert_equal( - int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock) # Compare the generated shortids to what we expect based on BIP 152, given # bitcoind's choice of nonce. def test_compactblock_construction(self, node, test_node): # Generate a bunch of transactions. self.generate(node, 101) num_transactions = 25 address = node.getnewaddress() for _ in range(num_transactions): txid = node.sendtoaddress(address, 100000) hex_tx = node.gettransaction(txid)["hex"] tx = FromHex(CTransaction(), hex_tx) # Wait until we've seen the block announcement for the resulting tip tip = int(node.getbestblockhash(), 16) test_node.wait_for_block_announcement(tip) # Make sure we will receive a fast-announce compact block self.request_cb_announcements(test_node, node) # Now mine a block, and look at the resulting compact block. test_node.clear_block_announcement() block_hash = int(self.generate(node, 1)[0], 16) # Store the raw block in our internal format. - block = FromHex(CBlock(), - node.getblock(uint256_hex(block_hash), False)) + block = FromHex(CBlock(), node.getblock(uint256_hex(block_hash), False)) for tx in block.vtx: tx.calc_sha256() block.rehash() # Wait until the block was announced (via compact blocks) - test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, - timeout=30) + test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30) # Now fetch and check the compact block header_and_shortids = None with p2p_lock: # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs( - test_node.last_message["cmpctblock"].header_and_shortids) + test_node.last_message["cmpctblock"].header_and_shortids + ) self.check_compactblock_construction_from_block( - header_and_shortids, block_hash, block) + header_and_shortids, block_hash, block + ) # Now fetch the compact block using a normal non-announce getdata test_node.clear_block_announcement() inv = CInv(MSG_CMPCT_BLOCK, block_hash) test_node.send_message(msg_getdata([inv])) - test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, - timeout=30) + test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30) # Now fetch and check the compact block header_and_shortids = None with p2p_lock: # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs( - test_node.last_message["cmpctblock"].header_and_shortids) + test_node.last_message["cmpctblock"].header_and_shortids + ) self.check_compactblock_construction_from_block( - header_and_shortids, block_hash, block) + header_and_shortids, block_hash, block + ) def check_compactblock_construction_from_block( - self, header_and_shortids, block_hash, block): + self, header_and_shortids, block_hash, block + ): # Check that we got the right block! header_and_shortids.header.calc_sha256() assert_equal(header_and_shortids.header.sha256, block_hash) # Make sure the prefilled_txn appears to have included the coinbase assert len(header_and_shortids.prefilled_txn) >= 1 assert_equal(header_and_shortids.prefilled_txn[0].index, 0) # Check that all prefilled_txn entries match what's in the block. for entry in header_and_shortids.prefilled_txn: entry.tx.calc_sha256() # This checks the tx agree assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256) # Check that the cmpctblock message announced all the transactions. - assert_equal(len(header_and_shortids.prefilled_txn) - + len(header_and_shortids.shortids), len(block.vtx)) + assert_equal( + len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), + len(block.vtx), + ) # And now check that all the shortids are as expected as well. # Determine the siphash keys to use. [k0, k1] = header_and_shortids.get_siphash_keys() index = 0 while index < len(block.vtx): - if (len(header_and_shortids.prefilled_txn) > 0 and - header_and_shortids.prefilled_txn[0].index == index): + if ( + len(header_and_shortids.prefilled_txn) > 0 + and header_and_shortids.prefilled_txn[0].index == index + ): # Already checked prefilled transactions above header_and_shortids.prefilled_txn.pop(0) else: tx_hash = block.vtx[index].sha256 shortid = calculate_shortid(k0, k1, tx_hash) assert_equal(shortid, header_and_shortids.shortids[0]) header_and_shortids.shortids.pop(0) index += 1 # Test that bitcoind requests compact blocks when we announce new blocks # via header or inv, and that responding to getblocktxn causes the block # to be successfully reconstructed. def test_compactblock_requests(self, node, test_node, version): # Try announcing a block with an inv or header, expect a compactblock # request for announce in ["inv", "header"]: block = self.build_block_on_tip(node) if announce == "inv": - test_node.send_message( - msg_inv([CInv(MSG_BLOCK, block.sha256)])) - self.wait_until(lambda: "getheaders" in test_node.last_message, - timeout=30) + test_node.send_message(msg_inv([CInv(MSG_BLOCK, block.sha256)])) + self.wait_until( + lambda: "getheaders" in test_node.last_message, timeout=30 + ) test_node.send_header_for_blocks([block]) else: test_node.send_header_for_blocks([block]) test_node.wait_for_getdata([block.sha256], timeout=30) assert_equal(test_node.last_message["getdata"].inv[0].type, 4) # Send back a compactblock message that omits the coinbase comp_block = HeaderAndShortIDs() comp_block.header = CBlockHeader(block) comp_block.nonce = 0 [k0, k1] = comp_block.get_siphash_keys() coinbase_hash = block.vtx[0].sha256 - comp_block.shortids = [ - calculate_shortid(k0, k1, coinbase_hash)] + comp_block.shortids = [calculate_shortid(k0, k1, coinbase_hash)] test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # Expect a getblocktxn message. with p2p_lock: assert "getblocktxn" in test_node.last_message - absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute( - ) + absolute_indexes = test_node.last_message[ + "getblocktxn" + ].block_txn_request.to_absolute() assert_equal(absolute_indexes, [0]) # should be a coinbase request # Send the coinbase, and verify that the tip advances. msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = [block.vtx[0]] test_node.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) # Create a chain of transactions from given utxo, and add to a new block. # Note that num_transactions is number of transactions not including the # coinbase. def build_block_with_transactions(self, node, utxo, num_transactions): block = self.build_block_on_tip(node) for _ in range(num_transactions): tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b'')) + tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b"")) tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE]))) pad_tx(tx) tx.rehash() utxo = [tx.sha256, 0, tx.vout[0].nValue] block.vtx.append(tx) ordered_txs = block.vtx - block.vtx = [block.vtx[0]] + \ - sorted(block.vtx[1:], key=lambda tx: tx.get_id()) + block.vtx = [block.vtx[0]] + sorted(block.vtx[1:], key=lambda tx: tx.get_id()) block.hashMerkleRoot = block.calc_merkle_root() block.solve() return block, ordered_txs # Test that we only receive getblocktxn requests for transactions that the # node needs, and that responding to them causes the block to be # reconstructed. def test_getblocktxn_requests(self, node, test_node, version): def test_getblocktxn_response(compact_block, peer, expected_result): msg = msg_cmpctblock(compact_block.to_p2p()) peer.send_and_ping(msg) with p2p_lock: assert "getblocktxn" in peer.last_message - absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute( - ) + absolute_indexes = peer.last_message[ + "getblocktxn" + ].block_txn_request.to_absolute() assert_equal(absolute_indexes, expected_result) def test_tip_after_message(node, peer, msg, tip): peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), tip) # First try announcing compactblocks that won't reconstruct, and verify # that we receive getblocktxn messages back. utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) - self.utxos.append( - [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) + self.utxos.append([ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5]) msg_bt = msg_blocktxn() - msg_bt.block_transactions = BlockTransactions( - block.sha256, block.vtx[1:]) + msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:]) test_tip_after_message(node, test_node, msg_bt, block.sha256) utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) - self.utxos.append( - [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) + self.utxos.append([ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) # Now try interspersing the prefilled transactions - comp_block.initialize_from_block( - block, prefill_list=[0, 1, 5]) + comp_block.initialize_from_block(block, prefill_list=[0, 1, 5]) test_getblocktxn_response(comp_block, test_node, [2, 3, 4]) - msg_bt.block_transactions = BlockTransactions( - block.sha256, block.vtx[2:5]) + msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now try giving one transaction ahead of time. utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) - self.utxos.append( - [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) + self.utxos.append([ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) test_node.send_and_ping(msg_tx(ordered_txs[1])) assert ordered_txs[1].hash in node.getrawmempool() test_node.send_and_ping(msg_tx(ordered_txs[1])) # Prefill 4 out of the 6 transactions, and verify that only the one # that was not in the mempool is requested. prefill_list = [0, 1, 2, 3, 4, 5] prefill_list.remove(block.vtx.index(ordered_txs[1])) expected_index = block.vtx.index(ordered_txs[-1]) prefill_list.remove(expected_index) comp_block.initialize_from_block(block, prefill_list=prefill_list) test_getblocktxn_response(comp_block, test_node, [expected_index]) - msg_bt.block_transactions = BlockTransactions( - block.sha256, [ordered_txs[5]]) + msg_bt.block_transactions = BlockTransactions(block.sha256, [ordered_txs[5]]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now provide all transactions to the node before the block is # announced and verify reconstruction happens immediately. utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 10) - self.utxos.append( - [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) + self.utxos.append([ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) for tx in ordered_txs[1:]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in block.vtx[1:]: assert tx.hash in mempool # Clear out last request. with p2p_lock: test_node.last_message.pop("getblocktxn", None) # Send compact block comp_block.initialize_from_block(block, prefill_list=[0]) test_tip_after_message( - node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256) + node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256 + ) with p2p_lock: # Shouldn't have gotten a request for any transaction assert "getblocktxn" not in test_node.last_message # Incorrectly responding to a getblocktxn shouldn't cause the block to be # permanently failed. def test_incorrect_blocktxn_response(self, node, test_node, version): - if (len(self.utxos) == 0): + if len(self.utxos) == 0: self.make_utxos() utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 10) - self.utxos.append( - [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) + self.utxos.append([ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) # Relay the first 5 transactions from the block in advance for tx in ordered_txs[1:6]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in ordered_txs[1:6]: assert tx.hash in mempool # Send compact block comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0]) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) absolute_indices = [] with p2p_lock: assert "getblocktxn" in test_node.last_message - absolute_indices = test_node.last_message["getblocktxn"].block_txn_request.to_absolute( - ) + absolute_indices = test_node.last_message[ + "getblocktxn" + ].block_txn_request.to_absolute() expected_indices = [] for i in [6, 7, 8, 9, 10]: expected_indices.append(block.vtx.index(ordered_txs[i])) assert_equal(absolute_indices, sorted(expected_indices)) # Now give an incorrect response. # Note that it's possible for bitcoind to be smart enough to know we're # lying, since it could check to see if the shortid matches what we're # sending, and eg disconnect us for misbehavior. If that behavior # change was made, we could just modify this test by having a # different peer provide the block further down, so that we're still # verifying that the block isn't marked bad permanently. This is good # enough for now. msg = msg_blocktxn() msg.block_transactions = BlockTransactions( - block.sha256, [ordered_txs[5]] + ordered_txs[7:]) + block.sha256, [ordered_txs[5]] + ordered_txs[7:] + ) test_node.send_and_ping(msg) # Tip should not have updated assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # We should receive a getdata request test_node.wait_for_getdata([block.sha256], timeout=10) assert test_node.last_message["getdata"].inv[0].type == MSG_BLOCK # Deliver the block test_node.send_and_ping(msg_block(block)) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def test_getblocktxn_handler(self, node, test_node, version): # bitcoind will not send blocktxn responses for blocks whose height is # more than 10 blocks deep. MAX_GETBLOCKTXN_DEPTH = 10 chain_height = node.getblockcount() current_height = chain_height - while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH): + while current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH: block_hash = node.getblockhash(current_height) block = FromHex(CBlock(), node.getblock(block_hash, False)) msg = msg_getblocktxn() - msg.block_txn_request = BlockTransactionsRequest( - int(block_hash, 16), []) + msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), []) num_to_request = random.randint(1, len(block.vtx)) msg.block_txn_request.from_absolute( - sorted(random.sample(range(len(block.vtx)), num_to_request))) + sorted(random.sample(range(len(block.vtx)), num_to_request)) + ) test_node.send_message(msg) - test_node.wait_until(lambda: "blocktxn" in test_node.last_message, - timeout=10) + test_node.wait_until( + lambda: "blocktxn" in test_node.last_message, timeout=10 + ) [tx.calc_sha256() for tx in block.vtx] with p2p_lock: - assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int( - block_hash, 16)) + assert_equal( + test_node.last_message["blocktxn"].block_transactions.blockhash, + int(block_hash, 16), + ) all_indices = msg.block_txn_request.to_absolute() for index in all_indices: - tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop( - 0) + tx = test_node.last_message[ + "blocktxn" + ].block_transactions.transactions.pop(0) tx.calc_sha256() assert_equal(tx.sha256, block.vtx[index].sha256) test_node.last_message.pop("blocktxn", None) current_height -= 1 # Next request should send a full block response, as we're past the # allowed depth for a blocktxn response. block_hash = node.getblockhash(current_height) - msg.block_txn_request = BlockTransactionsRequest( - int(block_hash, 16), [0]) + msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0]) with p2p_lock: test_node.last_message.pop("block", None) test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with p2p_lock: test_node.last_message["block"].block.calc_sha256() assert_equal( - test_node.last_message["block"].block.sha256, int(block_hash, 16)) + test_node.last_message["block"].block.sha256, int(block_hash, 16) + ) assert "blocktxn" not in test_node.last_message def test_compactblocks_not_at_tip(self, node, test_node): # Test that requesting old compactblocks doesn't work. MAX_CMPCTBLOCK_DEPTH = 5 new_blocks = [] for _ in range(MAX_CMPCTBLOCK_DEPTH + 1): test_node.clear_block_announcement() new_blocks.append(self.generate(node, 1)[0]) - test_node.wait_until(test_node.received_block_announcement, - timeout=30) + test_node.wait_until(test_node.received_block_announcement, timeout=30) test_node.clear_block_announcement() - test_node.send_message(msg_getdata( - [CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))])) - test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, - timeout=30) + test_node.send_message( + msg_getdata([CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))]) + ) + test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30) test_node.clear_block_announcement() self.generate(node, 1) - test_node.wait_until(test_node.received_block_announcement, - timeout=30) + test_node.wait_until(test_node.received_block_announcement, timeout=30) test_node.clear_block_announcement() with p2p_lock: test_node.last_message.pop("block", None) - test_node.send_message(msg_getdata( - [CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))])) - test_node.wait_until(lambda: "block" in test_node.last_message, - timeout=30) + test_node.send_message( + msg_getdata([CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))]) + ) + test_node.wait_until(lambda: "block" in test_node.last_message, timeout=30) with p2p_lock: test_node.last_message["block"].block.calc_sha256() assert_equal( - test_node.last_message["block"].block.sha256, int(new_blocks[0], 16)) + test_node.last_message["block"].block.sha256, int(new_blocks[0], 16) + ) # Generate an old compactblock, and verify that it's not accepted. cur_height = node.getblockcount() hashPrevBlock = int(node.getblockhash(cur_height - 5), 16) block = self.build_block_on_tip(node) block.hashPrevBlock = hashPrevBlock block.solve() comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) tips = node.getchaintips() found = False for x in tips: if x["hash"] == block.hash: assert_equal(x["status"], "headers-only") found = True break assert found # Requesting this block via getblocktxn should silently fail # (to avoid fingerprinting attacks). msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0]) with p2p_lock: test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with p2p_lock: assert "blocktxn" not in test_node.last_message def test_end_to_end_block_relay(self, node, listeners): utxo = self.utxos.pop(0) block, _ = self.build_block_with_transactions(node, utxo, 10) [listener.clear_block_announcement() for listener in listeners] node.submitblock(ToHex(block)) for listener in listeners: - listener.wait_until(lambda: "cmpctblock" in listener.last_message, - timeout=30) + listener.wait_until( + lambda: "cmpctblock" in listener.last_message, timeout=30 + ) with p2p_lock: for listener in listeners: - listener.last_message["cmpctblock"].header_and_shortids.header.calc_sha256( - ) + listener.last_message[ + "cmpctblock" + ].header_and_shortids.header.calc_sha256() assert_equal( - listener.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256) + listener.last_message[ + "cmpctblock" + ].header_and_shortids.header.sha256, + block.sha256, + ) # Test that we don't get disconnected if we relay a compact block with valid header, # but invalid transactions. def test_invalid_tx_in_compactblock(self, node, test_node): assert len(self.utxos) utxo = self.utxos[0] block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) block.vtx.remove(ordered_txs[3]) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Now send the compact block with all transactions prefilled, and # verify that we don't get disconnected. comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4]) msg = msg_cmpctblock(comp_block.to_p2p()) test_node.send_and_ping(msg) # Check that the tip didn't advance assert int(node.getbestblockhash(), 16) is not block.sha256 test_node.sync_with_ping() # Helper for enabling cb announcements # Send the sendcmpct request and sync headers def request_cb_announcements(self, peer, node, version=1): tip = node.getbestblockhash() peer.get_headers(locator=[int(tip, 16)], hashstop=0) peer.send_and_ping(msg_sendcmpct(announce=True, version=version)) def test_compactblock_reconstruction_multiple_peers( - self, node, stalling_peer, delivery_peer): + self, node, stalling_peer, delivery_peer + ): assert len(self.utxos) def announce_cmpct_block(node, peer): utxo = self.utxos.pop(0) block, _ = self.build_block_with_transactions(node, utxo, 5) cmpct_block = HeaderAndShortIDs() cmpct_block.initialize_from_block(block) msg = msg_cmpctblock(cmpct_block.to_p2p()) peer.send_and_ping(msg) with p2p_lock: assert "getblocktxn" in peer.last_message return block, cmpct_block block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() mempool = node.getrawmempool() for tx in block.vtx[1:]: assert tx.hash in mempool delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.sha256) - self.utxos.append( - [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Now test that delivering an invalid compact block won't break relay block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() # TODO: modify txhash in a way that doesn't impact txid. delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) # Because txhash isn't modified, we end up reconstructing the same block # assert int(node.getbestblockhash(), 16) != block.sha256 msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = block.vtx[1:] stalling_peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def test_highbandwidth_mode_states_via_getpeerinfo(self): # create new p2p connection for a fresh state w/o any prior sendcmpct # messages sent hb_test_node = self.nodes[0].add_p2p_connection(TestP2PConn()) # assert the RPC getpeerinfo boolean fields `bip152_hb_{to, from}` # match the given parameters for the last peer of a given node def assert_highbandwidth_states(node, hb_to, hb_from): peerinfo = node.getpeerinfo()[-1] - assert_equal(peerinfo['bip152_hb_to'], hb_to) - assert_equal(peerinfo['bip152_hb_from'], hb_from) + assert_equal(peerinfo["bip152_hb_to"], hb_to) + assert_equal(peerinfo["bip152_hb_from"], hb_from) # initially, neither node has selected the other peer as high-bandwidth # yet assert_highbandwidth_states(self.nodes[0], hb_to=False, hb_from=False) # peer requests high-bandwidth mode by sending sendcmpct(1) hb_test_node.send_and_ping(msg_sendcmpct(announce=True, version=1)) assert_highbandwidth_states(self.nodes[0], hb_to=False, hb_from=True) # peer generates a block and sends it to node, which should # select the peer as high-bandwidth (up to 3 peers according to BIP # 152) block = self.build_block_on_tip(self.nodes[0]) hb_test_node.send_and_ping(msg_block(block)) assert_highbandwidth_states(self.nodes[0], hb_to=True, hb_from=True) # peer requests low-bandwidth mode by sending sendcmpct(0) hb_test_node.send_and_ping(msg_sendcmpct(announce=False, version=1)) assert_highbandwidth_states(self.nodes[0], hb_to=True, hb_from=False) def run_test(self): # Get the nodes out of IBD self.generate(self.nodes[0], 1) # Setup the p2p connections self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn()) self.ex_softfork_node = self.nodes[1].add_p2p_connection( - TestP2PConn(), services=NODE_NETWORK) + TestP2PConn(), services=NODE_NETWORK + ) self.old_node = self.nodes[1].add_p2p_connection( - TestP2PConn(), services=NODE_NETWORK) + TestP2PConn(), services=NODE_NETWORK + ) # We will need UTXOs to construct transactions in later tests. self.make_utxos() self.log.info("Running tests:") self.log.info("\tTesting SENDCMPCT p2p message... ") self.test_sendcmpct(self.nodes[0], self.test_node, 1) self.sync_blocks() self.test_sendcmpct( - self.nodes[1], self.ex_softfork_node, 1, old_node=self.old_node) + self.nodes[1], self.ex_softfork_node, 1, old_node=self.old_node + ) self.sync_blocks() self.log.info("\tTesting compactblock construction...") self.test_compactblock_construction(self.nodes[0], self.test_node) self.sync_blocks() - self.test_compactblock_construction( - self.nodes[1], self.ex_softfork_node) + self.test_compactblock_construction(self.nodes[1], self.ex_softfork_node) self.sync_blocks() self.log.info("\tTesting compactblock requests... ") self.test_compactblock_requests(self.nodes[0], self.test_node, 1) self.sync_blocks() - self.test_compactblock_requests( - self.nodes[1], self.ex_softfork_node, 2) + self.test_compactblock_requests(self.nodes[1], self.ex_softfork_node, 2) self.sync_blocks() self.log.info("\tTesting getblocktxn requests...") self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) self.sync_blocks() self.test_getblocktxn_requests(self.nodes[1], self.ex_softfork_node, 2) self.sync_blocks() self.log.info("\tTesting getblocktxn handler...") self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1) self.sync_blocks() self.test_getblocktxn_handler(self.nodes[1], self.ex_softfork_node, 2) self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) self.sync_blocks() self.log.info( - "\tTesting compactblock requests/announcements not at chain tip...") + "\tTesting compactblock requests/announcements not at chain tip..." + ) self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node) self.sync_blocks() - self.test_compactblocks_not_at_tip( - self.nodes[1], self.ex_softfork_node) + self.test_compactblocks_not_at_tip(self.nodes[1], self.ex_softfork_node) self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node) self.sync_blocks() self.log.info("\tTesting handling of incorrect blocktxn responses...") self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1) self.sync_blocks() - self.test_incorrect_blocktxn_response( - self.nodes[1], self.ex_softfork_node, 2) + self.test_incorrect_blocktxn_response(self.nodes[1], self.ex_softfork_node, 2) self.sync_blocks() # End-to-end block relay tests self.log.info("\tTesting end-to-end block relay...") self.request_cb_announcements(self.test_node, self.nodes[0]) self.request_cb_announcements(self.old_node, self.nodes[1]) - self.request_cb_announcements( - self.ex_softfork_node, self.nodes[1], version=2) + self.request_cb_announcements(self.ex_softfork_node, self.nodes[1], version=2) self.test_end_to_end_block_relay( - self.nodes[0], [self.ex_softfork_node, self.test_node, self.old_node]) + self.nodes[0], [self.ex_softfork_node, self.test_node, self.old_node] + ) self.test_end_to_end_block_relay( - self.nodes[1], [self.ex_softfork_node, self.test_node, self.old_node]) + self.nodes[1], [self.ex_softfork_node, self.test_node, self.old_node] + ) self.log.info("\tTesting handling of invalid compact blocks...") self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node) - self.test_invalid_tx_in_compactblock( - self.nodes[1], self.ex_softfork_node) + self.test_invalid_tx_in_compactblock(self.nodes[1], self.ex_softfork_node) self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node) - self.log.info( - "\tTesting reconstructing compact blocks from all peers...") + self.log.info("\tTesting reconstructing compact blocks from all peers...") self.test_compactblock_reconstruction_multiple_peers( - self.nodes[1], self.ex_softfork_node, self.old_node) + self.nodes[1], self.ex_softfork_node, self.old_node + ) self.sync_blocks() self.log.info("\tTesting invalid index in cmpctblock message...") self.test_invalid_cmpctblock_message() self.log.info("Testing high-bandwidth mode states via getpeerinfo...") self.test_highbandwidth_mode_states_via_getpeerinfo() -if __name__ == '__main__': +if __name__ == "__main__": CompactBlocksTest().main() diff --git a/test/functional/p2p_compactblocks_hb.py b/test/functional/p2p_compactblocks_hb.py index 380544a87..7c852aa43 100755 --- a/test/functional/p2p_compactblocks_hb.py +++ b/test/functional/p2p_compactblocks_hb.py @@ -1,96 +1,96 @@ #!/usr/bin/env python3 # Copyright (c) 2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test compact blocks HB selection logic.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class CompactBlocksConnectionTest(BitcoinTestFramework): """Test class for verifying selection of HB peer connections.""" def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 6 def peer_info(self, from_node, to_node): """Query from_node for its getpeerinfo about to_node.""" for peerinfo in self.nodes[from_node].getpeerinfo(): - if f"testnode-{to_node}" in peerinfo['subver']: + if f"testnode-{to_node}" in peerinfo["subver"]: return peerinfo return None def setup_network(self): self.setup_nodes() # Start network with everyone disconnected self.sync_all() def relay_block_through(self, peer): """Relay a new block through peer peer, and return HB status between 1 and [2,3,4,5].""" self.connect_nodes(peer, 0) self.generate(self.nodes[0], 1, sync_fun=self.sync_blocks) self.disconnect_nodes(peer, 0) - status_to = [self.peer_info(1, i)['bip152_hb_to'] for i in range(2, 6)] - status_from = [self.peer_info(i, 1)['bip152_hb_from'] for i in range(2, 6)] + status_to = [self.peer_info(1, i)["bip152_hb_to"] for i in range(2, 6)] + status_from = [self.peer_info(i, 1)["bip152_hb_from"] for i in range(2, 6)] assert_equal(status_to, status_from) return status_to def run_test(self): self.log.info("Testing reserved high-bandwidth mode slot for outbound peer...") # Connect everyone to node 0, and mine some blocks to get all nodes out of IBD. for i in range(1, 6): self.connect_nodes(i, 0) self.generate(self.nodes[0], 2, sync_fun=self.sync_blocks) for i in range(1, 6): self.disconnect_nodes(i, 0) # Construct network topology: # - Node 0 is the block producer # - Node 1 is the "target" node being tested # - Nodes 2-5 are intermediaries. # - Node 1 has an outbound connection to node 2 # - Node 1 has inbound connections from nodes 3-5 self.connect_nodes(3, 1) self.connect_nodes(4, 1) self.connect_nodes(5, 1) self.connect_nodes(1, 2) # Mine blocks subsequently relaying through nodes 3,4,5 (inbound to node 1) for nodeid in range(3, 6): status = self.relay_block_through(nodeid) assert_equal(status, [False, nodeid >= 3, nodeid >= 4, nodeid >= 5]) # And again through each. This should not change HB status. for nodeid in range(3, 6): status = self.relay_block_through(nodeid) assert_equal(status, [False, True, True, True]) # Now relay one block through peer 2 (outbound from node 1), so it should take # HB status from one of the inbounds. status = self.relay_block_through(2) assert_equal(status[0], True) assert_equal(sum(status), 3) # Now relay again through nodes 3,4,5. Since 2 is outbound, it should remain HB. for nodeid in range(3, 6): status = self.relay_block_through(nodeid) assert status[0] assert status[nodeid - 2] assert_equal(sum(status), 3) # Reconnect peer 2, and retry. Now the three inbounds should be HB again. self.disconnect_nodes(1, 2) self.connect_nodes(1, 2) for nodeid in range(3, 6): status = self.relay_block_through(nodeid) assert not status[0] assert status[nodeid - 2] assert_equal(status, [False, True, True, True]) -if __name__ == '__main__': +if __name__ == "__main__": CompactBlocksConnectionTest().main() diff --git a/test/functional/p2p_disconnect_ban.py b/test/functional/p2p_disconnect_ban.py index 0580c3ac8..f9a053527 100755 --- a/test/functional/p2p_disconnect_ban.py +++ b/test/functional/p2p_disconnect_ban.py @@ -1,134 +1,142 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node disconnect and ban behavior""" import time from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error class DisconnectBanTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.supports_cli = False def run_test(self): self.log.info("Connect nodes both way") self.connect_nodes(0, 1) self.connect_nodes(1, 0) self.log.info("Test setban and listbanned RPCs") self.log.info("setban: successfully ban single IP address") # node1 should have 2 connections to node0 at this point assert_equal(len(self.nodes[1].getpeerinfo()), 2) self.nodes[1].setban(subnet="127.0.0.1", command="add") - self.wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, - timeout=10) + self.wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10) # all nodes must be disconnected at this point assert_equal(len(self.nodes[1].getpeerinfo()), 0) assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("clearbanned: successfully clear ban list") self.nodes[1].clearbanned() assert_equal(len(self.nodes[1].listbanned()), 0) self.nodes[1].setban("127.0.0.0/24", "add") self.log.info("setban: fail to ban an already banned subnet") assert_equal(len(self.nodes[1].listbanned()), 1) assert_raises_rpc_error( - -23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add") + -23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add" + ) self.log.info("setban: fail to ban an invalid subnet") assert_raises_rpc_error( - -30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add") + -30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add" + ) # still only one banned ip because 127.0.0.1 is within the range of # 127.0.0.0/24 assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("setban remove: fail to unban a non-banned subnet") assert_raises_rpc_error( - -30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove") + -30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove" + ) assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("setban remove: successfully unban subnet") self.nodes[1].setban("127.0.0.0/24", "remove") assert_equal(len(self.nodes[1].listbanned()), 0) self.nodes[1].clearbanned() assert_equal(len(self.nodes[1].listbanned()), 0) self.log.info("setban: test persistence across node restart") self.nodes[1].setban("127.0.0.0/32", "add") self.nodes[1].setban("127.0.0.0/24", "add") # Set the mocktime so we can control when bans expire old_time = int(time.time()) self.nodes[1].setmocktime(old_time) # ban for 1 seconds self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1000 seconds - self.nodes[1].setban( - "2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) + self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) listBeforeShutdown = self.nodes[1].listbanned() - assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) + assert_equal("192.168.0.1/32", listBeforeShutdown[2]["address"]) # Move time forward by 3 seconds so the third ban has expired self.nodes[1].setmocktime(old_time + 3) assert_equal(len(self.nodes[1].listbanned()), 3) self.restart_node(1) listAfterShutdown = self.nodes[1].listbanned() - assert_equal("127.0.0.0/24", listAfterShutdown[0]['address']) - assert_equal("127.0.0.0/32", listAfterShutdown[1]['address']) - assert_equal("/19" in listAfterShutdown[2]['address'], True) + assert_equal("127.0.0.0/24", listAfterShutdown[0]["address"]) + assert_equal("127.0.0.0/32", listAfterShutdown[1]["address"]) + assert_equal("/19" in listAfterShutdown[2]["address"], True) # Clear ban lists self.nodes[1].clearbanned() self.log.info("Connect nodes both way") self.connect_nodes(0, 1) self.connect_nodes(1, 0) self.log.info("Test disconnectnode RPCs") self.log.info( - "disconnectnode: fail to disconnect when calling with address and nodeid") - address1 = self.nodes[0].getpeerinfo()[0]['addr'] - node1 = self.nodes[0].getpeerinfo()[0]['addr'] + "disconnectnode: fail to disconnect when calling with address and nodeid" + ) + address1 = self.nodes[0].getpeerinfo()[0]["addr"] + node1 = self.nodes[0].getpeerinfo()[0]["addr"] assert_raises_rpc_error( - -32602, "Only one of address and nodeid should be provided.", - self.nodes[0].disconnectnode, address=address1, nodeid=node1) + -32602, + "Only one of address and nodeid should be provided.", + self.nodes[0].disconnectnode, + address=address1, + nodeid=node1, + ) self.log.info( - "disconnectnode: fail to disconnect when calling with junk address") - assert_raises_rpc_error(-29, "Node not found in connected nodes", - self.nodes[0].disconnectnode, address="221B Baker Street") - - self.log.info( - "disconnectnode: successfully disconnect node by address") - address1 = self.nodes[0].getpeerinfo()[0]['addr'] + "disconnectnode: fail to disconnect when calling with junk address" + ) + assert_raises_rpc_error( + -29, + "Node not found in connected nodes", + self.nodes[0].disconnectnode, + address="221B Baker Street", + ) + + self.log.info("disconnectnode: successfully disconnect node by address") + address1 = self.nodes[0].getpeerinfo()[0]["addr"] self.nodes[0].disconnectnode(address=address1) - self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, - timeout=10) - assert not [node for node in self.nodes[0].getpeerinfo() - if node['addr'] == address1] + self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10) + assert not [ + node for node in self.nodes[0].getpeerinfo() if node["addr"] == address1 + ] self.log.info("disconnectnode: successfully reconnect node") # reconnect the node self.connect_nodes(0, 1) assert_equal(len(self.nodes[0].getpeerinfo()), 2) - assert [node for node in self.nodes[0] - .getpeerinfo() if node['addr'] == address1] + assert [ + node for node in self.nodes[0].getpeerinfo() if node["addr"] == address1 + ] - self.log.info( - "disconnectnode: successfully disconnect node by node id") - id1 = self.nodes[0].getpeerinfo()[0]['id'] + self.log.info("disconnectnode: successfully disconnect node by node id") + id1 = self.nodes[0].getpeerinfo()[0]["id"] self.nodes[0].disconnectnode(nodeid=id1) - self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, - timeout=10) - assert not [node for node in self.nodes[0].getpeerinfo() - if node['id'] == id1] + self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10) + assert not [node for node in self.nodes[0].getpeerinfo() if node["id"] == id1] -if __name__ == '__main__': +if __name__ == "__main__": DisconnectBanTest().main() diff --git a/test/functional/p2p_dos_header_tree.py b/test/functional/p2p_dos_header_tree.py index 46c7086b8..f1902f49c 100755 --- a/test/functional/p2p_dos_header_tree.py +++ b/test/functional/p2p_dos_header_tree.py @@ -1,92 +1,89 @@ #!/usr/bin/env python3 # Copyright (c) 2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test that we reject low difficulty headers to prevent our block tree from filling up with useless bloat""" import os from test_framework.messages import CBlockHeader, FromHex from test_framework.p2p import P2PInterface, msg_headers from test_framework.test_framework import BitcoinTestFramework class RejectLowDifficultyHeadersTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True - self.chain = 'testnet3' # Use testnet chain because it has an early checkpoint + self.chain = "testnet3" # Use testnet chain because it has an early checkpoint self.num_nodes = 2 def add_options(self, parser): parser.add_argument( - '--datafile', - default='data/blockheader_testnet3.hex', - help='Test data file (default: %(default)s)', + "--datafile", + default="data/blockheader_testnet3.hex", + help="Test data file (default: %(default)s)", ) def run_test(self): self.log.info("Read headers data") self.headers_file_path = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - self.options.datafile) - with open(self.headers_file_path, encoding='utf-8') as headers_data: + os.path.dirname(os.path.realpath(__file__)), self.options.datafile + ) + with open(self.headers_file_path, encoding="utf-8") as headers_data: h_lines = [line.strip() for line in headers_data.readlines()] # The headers data is taken from testnet3 for early blocks from genesis until the first checkpoint. There are # two headers with valid POW at height 1 and 2, forking off from # genesis. They are indicated by the FORK_PREFIX. - FORK_PREFIX = 'fork:' - self.headers = [ - line for line in h_lines if not line.startswith(FORK_PREFIX)] - self.headers_fork = [line[len(FORK_PREFIX):] - for line in h_lines if line.startswith(FORK_PREFIX)] + FORK_PREFIX = "fork:" + self.headers = [line for line in h_lines if not line.startswith(FORK_PREFIX)] + self.headers_fork = [ + line[len(FORK_PREFIX) :] for line in h_lines if line.startswith(FORK_PREFIX) + ] self.headers = [FromHex(CBlockHeader(), h) for h in self.headers] - self.headers_fork = [FromHex(CBlockHeader(), h) - for h in self.headers_fork] + self.headers_fork = [FromHex(CBlockHeader(), h) for h in self.headers_fork] self.log.info( - "Feed all non-fork headers, including and up to the first checkpoint") + "Feed all non-fork headers, including and up to the first checkpoint" + ) peer_checkpoint = self.nodes[0].add_p2p_connection(P2PInterface()) peer_checkpoint.send_and_ping(msg_headers(self.headers)) assert { - 'height': 546, - 'hash': '000000002a936ca763904c3c35fce2f3556c559c0214345d31b1bcebf76acb70', - 'branchlen': 546, - 'status': 'headers-only', + "height": 546, + "hash": "000000002a936ca763904c3c35fce2f3556c559c0214345d31b1bcebf76acb70", + "branchlen": 546, + "status": "headers-only", } in self.nodes[0].getchaintips() self.log.info("Feed all fork headers (fails due to checkpoint)") - with self.nodes[0].assert_debug_log(['bad-fork-prior-to-checkpoint']): + with self.nodes[0].assert_debug_log(["bad-fork-prior-to-checkpoint"]): peer_checkpoint.send_message(msg_headers(self.headers_fork)) peer_checkpoint.wait_for_disconnect() self.log.info("Feed all fork headers (succeeds without checkpoint)") # On node 0 it succeeds because checkpoints are disabled - self.restart_node(0, ['-nocheckpoints']) + self.restart_node(0, ["-nocheckpoints"]) peer_no_checkpoint = self.nodes[0].add_p2p_connection(P2PInterface()) peer_no_checkpoint.send_and_ping(msg_headers(self.headers_fork)) assert { "height": 2, "hash": "00000000b0494bd6c3d5ff79c497cfce40831871cbf39b1bc28bd1dac817dc39", "branchlen": 2, "status": "headers-only", } in self.nodes[0].getchaintips() # On node 1 it succeeds because no checkpoint has been reached yet by a # chain tip - peer_before_checkpoint = self.nodes[1].add_p2p_connection( - P2PInterface()) - peer_before_checkpoint.send_and_ping( - msg_headers(self.headers_fork)) + peer_before_checkpoint = self.nodes[1].add_p2p_connection(P2PInterface()) + peer_before_checkpoint.send_and_ping(msg_headers(self.headers_fork)) assert { "height": 2, "hash": "00000000b0494bd6c3d5ff79c497cfce40831871cbf39b1bc28bd1dac817dc39", "branchlen": 2, "status": "headers-only", } in self.nodes[1].getchaintips() -if __name__ == '__main__': +if __name__ == "__main__": RejectLowDifficultyHeadersTest().main() diff --git a/test/functional/p2p_eviction.py b/test/functional/p2p_eviction.py index 38376bad9..2b2943a6c 100755 --- a/test/functional/p2p_eviction.py +++ b/test/functional/p2p_eviction.py @@ -1,203 +1,211 @@ #!/usr/bin/env python3 # Copyright (c) 2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test node eviction logic When the number of peers has reached the limit of maximum connections, the next connecting inbound peer will trigger the eviction mechanism. We cannot currently test the parts of the eviction logic that are based on address/netgroup since in the current framework, all peers are connecting from the same local address. See Issue #14210 for more info. Therefore, this test is limited to the remaining protection criteria. """ import time from test_framework.avatools import ( AvaP2PInterface, avalanche_proof_from_hex, create_coinbase_stakes, ) from test_framework.blocktools import create_block, create_coinbase from test_framework.key import ECKey from test_framework.messages import ( CTransaction, FromHex, msg_avaproof, msg_pong, msg_tx, ) from test_framework.p2p import P2PDataStore, P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal from test_framework.wallet_util import bytes_to_wif class SlowP2PDataStore(P2PDataStore): def on_ping(self, message): time.sleep(0.1) self.send_message(msg_pong(message.nonce)) class SlowP2PInterface(P2PInterface): def on_ping(self, message): time.sleep(0.1) self.send_message(msg_pong(message.nonce)) class SlowAvaP2PInterface(AvaP2PInterface): def on_ping(self, message): time.sleep(0.1) self.send_message(msg_pong(message.nonce)) class P2PEvict(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 # The choice of maxconnections=188 results in a maximum of 153 inbound # connections (188 - 34 outbound - 1 feeler). The 34 outbounds count is # from 16 full-relay + 16 avalanche + 2 block-only-relay. # 152 inbound peers are protected from eviction: # 4 by netgroup, 4 that sent us blocks, 4 that sent us proofs, 4 that # sent us transactions, 8 via lowest ping time, 128 with the best # avalanche availability score - self.extra_args = [["-maxconnections=188", - "-avaproofstakeutxodustthreshold=1000000", - "-avaproofstakeutxoconfirmations=1", - "-maxavalancheoutbound=16"]] + self.extra_args = [ + [ + "-maxconnections=188", + "-avaproofstakeutxodustthreshold=1000000", + "-avaproofstakeutxoconfirmations=1", + "-maxavalancheoutbound=16", + ] + ] def run_test(self): # peers that we expect to be protected from eviction protected_peers = set() current_peer = -1 node = self.nodes[0] - blocks = self.generatetoaddress(node, - 101, node.get_deterministic_priv_key().address) + blocks = self.generatetoaddress( + node, 101, node.get_deterministic_priv_key().address + ) self.log.info( - "Create 4 peers and protect them from eviction by sending us a block") + "Create 4 peers and protect them from eviction by sending us a block" + ) for _ in range(4): block_peer = node.add_p2p_connection(SlowP2PDataStore()) current_peer += 1 block_peer.sync_with_ping() best_block = node.getbestblockhash() tip = int(best_block, 16) - best_block_time = node.getblock(best_block)['time'] + best_block_time = node.getblock(best_block)["time"] block = create_block( - tip, - create_coinbase( - node.getblockcount() + 1), - best_block_time + 1) + tip, create_coinbase(node.getblockcount() + 1), best_block_time + 1 + ) block.solve() block_peer.send_blocks_and_test([block], node, success=True) protected_peers.add(current_peer) self.log.info( - "Create 4 peers and protect them from eviction by sending us a proof") + "Create 4 peers and protect them from eviction by sending us a proof" + ) privkey = ECKey() privkey.generate() wif_privkey = bytes_to_wif(privkey.get_bytes()) stakes = create_coinbase_stakes( - node, blocks, node.get_deterministic_priv_key().key) + node, blocks, node.get_deterministic_priv_key().key + ) for i in range(4): proof_peer = node.add_p2p_connection(SlowP2PDataStore()) current_peer += 1 proof_peer.sync_with_ping() - proof = node.buildavalancheproof( - 42, 2000000000, wif_privkey, [stakes[i]]) + proof = node.buildavalancheproof(42, 2000000000, wif_privkey, [stakes[i]]) avaproof_msg = msg_avaproof() avaproof_msg.proof = avalanche_proof_from_hex(proof) proof_peer.send_message(avaproof_msg) protected_peers.add(current_peer) - self.log.info( - "Create 5 slow-pinging peers, making them eviction candidates") + self.log.info("Create 5 slow-pinging peers, making them eviction candidates") for _ in range(5): node.add_p2p_connection(SlowP2PInterface()) current_peer += 1 self.log.info( - "Create 4 peers and protect them from eviction by sending us a tx") + "Create 4 peers and protect them from eviction by sending us a tx" + ) for i in range(4): txpeer = node.add_p2p_connection(SlowP2PInterface()) current_peer += 1 txpeer.sync_with_ping() - prevtx = node.getblock(node.getblockhash(i + 1), 2)['tx'][0] + prevtx = node.getblock(node.getblockhash(i + 1), 2)["tx"][0] rawtx = node.createrawtransaction( - inputs=[{'txid': prevtx['txid'], 'vout': 0}], + inputs=[{"txid": prevtx["txid"], "vout": 0}], outputs=[ - {node.get_deterministic_priv_key().address: 50000000 - 1250.00}], + {node.get_deterministic_priv_key().address: 50000000 - 1250.00} + ], ) sigtx = node.signrawtransactionwithkey( hexstring=rawtx, privkeys=[node.get_deterministic_priv_key().key], - prevtxs=[{ - 'txid': prevtx['txid'], - 'vout': 0, - 'amount': prevtx['vout'][0]['value'], - 'scriptPubKey': prevtx['vout'][0]['scriptPubKey']['hex'], - }], - )['hex'] + prevtxs=[ + { + "txid": prevtx["txid"], + "vout": 0, + "amount": prevtx["vout"][0]["value"], + "scriptPubKey": prevtx["vout"][0]["scriptPubKey"]["hex"], + } + ], + )["hex"] txpeer.send_message(msg_tx(FromHex(CTransaction(), sigtx))) protected_peers.add(current_peer) self.log.info( - "Create 8 peers and protect them from eviction by having faster pings") + "Create 8 peers and protect them from eviction by having faster pings" + ) for _ in range(8): fastpeer = node.add_p2p_connection(P2PInterface()) current_peer += 1 - self.wait_until(lambda: "ping" in fastpeer.last_message, - timeout=10) + self.wait_until(lambda: "ping" in fastpeer.last_message, timeout=10) self.log.info( - "Create 128 peers and protect them from eviction by sending an avahello message") + "Create 128 peers and protect them from eviction by sending an avahello" + " message" + ) for _ in range(128): node.add_p2p_connection(SlowAvaP2PInterface()) current_peer += 1 # Make sure by asking the node what the actual min pings are peerinfo = node.getpeerinfo() pings = {} for i in range(len(peerinfo)): - pings[i] = peerinfo[i]['minping'] if 'minping' in peerinfo[i] else 1000000 + pings[i] = peerinfo[i]["minping"] if "minping" in peerinfo[i] else 1000000 sorted_pings = sorted(pings.items(), key=lambda x: x[1]) # Usually the 8 fast peers are protected. In rare case of unreliable pings, # one of the slower peers might have a faster min ping though. for i in range(8): protected_peers.add(sorted_pings[i][0]) self.log.info("Create peer that triggers the eviction mechanism") node.add_p2p_connection(SlowP2PInterface()) # One of the non-protected peers must be evicted. We can't be sure which one because # 4 peers are protected via netgroup, which is identical for all peers, # and the eviction mechanism doesn't preserve the order of identical # elements. evicted_peers = [] for i in range(len(node.p2ps)): if not node.p2ps[i].is_connected: evicted_peers.append(i) self.log.info("Test that one peer was evicted") - self.log.debug( - f"{len(evicted_peers)} evicted peer: {set(evicted_peers)}") + self.log.debug(f"{len(evicted_peers)} evicted peer: {set(evicted_peers)}") assert_equal(len(evicted_peers), 1) self.log.info("Test that no peer expected to be protected was evicted") - self.log.debug( - f"{len(protected_peers)} protected peers: {protected_peers}") + self.log.debug(f"{len(protected_peers)} protected peers: {protected_peers}") assert evicted_peers[0] not in protected_peers -if __name__ == '__main__': +if __name__ == "__main__": P2PEvict().main() diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py index 5da7f8f60..1a511bfa7 100755 --- a/test/functional/p2p_feefilter.py +++ b/test/functional/p2p_feefilter.py @@ -1,161 +1,181 @@ #!/usr/bin/env python3 # Copyright (c) 2016-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test processing of feefilter messages.""" from decimal import Decimal from test_framework.messages import MSG_TX, msg_feefilter from test_framework.p2p import P2PInterface, p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, uint256_hex from test_framework.wallet import MiniWallet class FeefilterConn(P2PInterface): feefilter_received = False def on_feefilter(self, message): self.feefilter_received = True def assert_feefilter_received(self, recv: bool): with p2p_lock: assert_equal(self.feefilter_received, recv) class TestP2PConn(P2PInterface): def __init__(self): super().__init__() self.txinvs = [] def on_inv(self, message): for i in message.inv: - if (i.type == MSG_TX): + if i.type == MSG_TX: self.txinvs.append(uint256_hex(i.hash)) def wait_for_invs_to_match(self, invs_expected): invs_expected.sort() self.wait_until(lambda: invs_expected == sorted(self.txinvs)) def clear_invs(self): with p2p_lock: self.txinvs = [] class FeeFilterTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 # We lower the various required feerates for this test # to catch a corner-case where feefilter used to slightly undercut # mempool and wallet feerate calculation based on GetFee # rounding down 3 places, leading to stranded transactions. # See issue #16499 # grant noban permission to all peers to speed up tx relay / mempool # sync - self.extra_args = [[ - "-minrelaytxfee=1", - "-mintxfee=1", - "-whitelist=noban@127.0.0.1", - ]] * self.num_nodes + self.extra_args = [ + [ + "-minrelaytxfee=1", + "-mintxfee=1", + "-whitelist=noban@127.0.0.1", + ] + ] * self.num_nodes def run_test(self): self.test_feefilter_forcerelay() self.test_feefilter() self.test_feefilter_blocksonly() def test_feefilter_forcerelay(self): self.log.info( - 'Check that peers without forcerelay permission (default) get a feefilter message') - self.nodes[0].add_p2p_connection( - FeefilterConn()).assert_feefilter_received(True) + "Check that peers without forcerelay permission (default) get a feefilter" + " message" + ) + self.nodes[0].add_p2p_connection(FeefilterConn()).assert_feefilter_received( + True + ) self.log.info( - 'Check that peers with forcerelay permission do not get a feefilter message') - self.restart_node(0, extra_args=['-whitelist=forcerelay@127.0.0.1']) - self.nodes[0].add_p2p_connection( - FeefilterConn()).assert_feefilter_received(False) + "Check that peers with forcerelay permission do not get a feefilter message" + ) + self.restart_node(0, extra_args=["-whitelist=forcerelay@127.0.0.1"]) + self.nodes[0].add_p2p_connection(FeefilterConn()).assert_feefilter_received( + False + ) # Restart to disconnect peers and load default extra_args self.restart_node(0) self.connect_nodes(1, 0) def test_feefilter(self): node1 = self.nodes[1] node0 = self.nodes[0] miniwallet = MiniWallet(node1) # Add enough mature utxos to the wallet, so that all txs spend # confirmed coins self.generate(miniwallet, 5) self.generate(node1, 100) conn = self.nodes[0].add_p2p_connection(TestP2PConn()) - self.log.info( - "Test txs paying 0.2 sat/byte are received by test connection") - txids = [miniwallet.send_self_transfer(fee_rate=Decimal('2.00'), - from_node=node1)['txid'] - for _ in range(3)] + self.log.info("Test txs paying 0.2 sat/byte are received by test connection") + txids = [ + miniwallet.send_self_transfer(fee_rate=Decimal("2.00"), from_node=node1)[ + "txid" + ] + for _ in range(3) + ] conn.wait_for_invs_to_match(txids) conn.clear_invs() # Set a fee filter of 0.15 sat/byte on test connection conn.send_and_ping(msg_feefilter(150)) - self.log.info( - "Test txs paying 0.15 sat/byte are received by test connection") - txids = [miniwallet.send_self_transfer(fee_rate=Decimal('1.50'), - from_node=node1)['txid'] - for _ in range(3)] + self.log.info("Test txs paying 0.15 sat/byte are received by test connection") + txids = [ + miniwallet.send_self_transfer(fee_rate=Decimal("1.50"), from_node=node1)[ + "txid" + ] + for _ in range(3) + ] conn.wait_for_invs_to_match(txids) conn.clear_invs() self.log.info( - "Test txs paying 0.1 sat/byte are no longer received by test connection") - txids = [miniwallet.send_self_transfer(fee_rate=Decimal('1.00'), - from_node=node1)['txid'] - for _ in range(3)] + "Test txs paying 0.1 sat/byte are no longer received by test connection" + ) + txids = [ + miniwallet.send_self_transfer(fee_rate=Decimal("1.00"), from_node=node1)[ + "txid" + ] + for _ in range(3) + ] self.sync_mempools() # must be sure node 0 has received all txs # Send one transaction from node0 that should be received, so that we # we can sync the test on receipt (if node1's txs were relayed, they'd # be received by the time this node0 tx is received). This is # unfortunately reliant on the current relay behavior where we batch up # to 35 entries in an inv, which means that when this next transaction # is eligible for relay, the prior transactions from node1 are eligible # as well. - txids = [miniwallet.send_self_transfer(fee_rate=Decimal('200.00'), - from_node=node0)['txid']] + txids = [ + miniwallet.send_self_transfer(fee_rate=Decimal("200.00"), from_node=node0)[ + "txid" + ] + ] conn.wait_for_invs_to_match(txids) conn.clear_invs() # must be sure node 1 has received all txs self.sync_mempools() self.log.info("Remove fee filter and check txs are received again") conn.send_and_ping(msg_feefilter(0)) - txids = [miniwallet.send_self_transfer(fee_rate=Decimal('200.00'), - from_node=node1)['txid'] - for _ in range(3)] + txids = [ + miniwallet.send_self_transfer(fee_rate=Decimal("200.00"), from_node=node1)[ + "txid" + ] + for _ in range(3) + ] conn.wait_for_invs_to_match(txids) conn.clear_invs() def test_feefilter_blocksonly(self): """Test that we don't send fee filters to block-relay-only peers and when we're in blocksonly mode.""" - self.log.info( - "Check that we don't send fee filters to block-relay-only peers.") + self.log.info("Check that we don't send fee filters to block-relay-only peers.") feefilter_peer = self.nodes[0].add_outbound_p2p_connection( - FeefilterConn(), p2p_idx=0, connection_type="block-relay-only") + FeefilterConn(), p2p_idx=0, connection_type="block-relay-only" + ) feefilter_peer.sync_with_ping() feefilter_peer.assert_feefilter_received(False) - self.log.info( - "Check that we don't send fee filters when in blocksonly mode.") + self.log.info("Check that we don't send fee filters when in blocksonly mode.") self.restart_node(0, ["-blocksonly"]) feefilter_peer = self.nodes[0].add_p2p_connection(FeefilterConn()) feefilter_peer.sync_with_ping() feefilter_peer.assert_feefilter_received(False) -if __name__ == '__main__': +if __name__ == "__main__": FeeFilterTest().main() diff --git a/test/functional/p2p_filter.py b/test/functional/p2p_filter.py index 28d6c3b7c..fdea8605c 100755 --- a/test/functional/p2p_filter.py +++ b/test/functional/p2p_filter.py @@ -1,292 +1,316 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test BIP 37 """ from test_framework.messages import ( COIN, MAX_BLOOM_FILTER_SIZE, MAX_BLOOM_HASH_FUNCS, MSG_BLOCK, MSG_FILTERED_BLOCK, CInv, msg_filteradd, msg_filterclear, msg_filterload, msg_getdata, msg_mempool, msg_version, ) from test_framework.p2p import ( P2P_SERVICES, P2P_SUBVERSION, P2P_VERSION, P2PInterface, p2p_lock, ) from test_framework.script import MAX_SCRIPT_ELEMENT_SIZE from test_framework.test_framework import BitcoinTestFramework from test_framework.wallet import MiniWallet, getnewdestination class P2PBloomFilter(P2PInterface): # This is a P2SH watch-only wallet watch_script_pubkey = bytes.fromhex( - 'a914ffffffffffffffffffffffffffffffffffffffff87') + "a914ffffffffffffffffffffffffffffffffffffffff87" + ) # The initial filter (n=10, fp=0.000001) with just the above scriptPubKey # added watch_filter_init = msg_filterload( - data=b'@\x00\x08\x00\x80\x00\x00 \x00\xc0\x00 \x04\x00\x08$\x00\x04\x80\x00\x00 \x00\x00\x00\x00\x80\x00\x00@\x00\x02@ \x00', + data=( + b"@\x00\x08\x00\x80\x00\x00 \x00\xc0\x00 \x04\x00\x08$\x00\x04\x80\x00\x00" + b" \x00\x00\x00\x00\x80\x00\x00@\x00\x02@ \x00" + ), nHashFuncs=19, nTweak=0, nFlags=1, ) def __init__(self): super().__init__() self._tx_received = False self._merkleblock_received = False def on_inv(self, message): want = msg_getdata() for i in message.inv: # inv messages can only contain TX or BLOCK, so translate BLOCK to # FILTERED_BLOCK if i.type == MSG_BLOCK: want.inv.append(CInv(MSG_FILTERED_BLOCK, i.hash)) else: want.inv.append(i) if len(want.inv): self.send_message(want) def on_merkleblock(self, message): self._merkleblock_received = True def on_tx(self, message): self._tx_received = True @property def tx_received(self): with p2p_lock: return self._tx_received @tx_received.setter def tx_received(self, value): with p2p_lock: self._tx_received = value @property def merkleblock_received(self): with p2p_lock: return self._merkleblock_received @merkleblock_received.setter def merkleblock_received(self, value): with p2p_lock: self._merkleblock_received = value class FilterTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 - self.extra_args = [[ - '-peerbloomfilters', - '-whitelist=noban@127.0.0.1', # immediate tx relay - ]] + self.extra_args = [ + [ + "-peerbloomfilters", + "-whitelist=noban@127.0.0.1", # immediate tx relay + ] + ] def generatetoscriptpubkey(self, scriptpubkey): """Helper to generate a single block to the given scriptPubKey.""" return self.generatetodescriptor( - self.nodes[0], 1, f'raw({scriptpubkey.hex()})')[0] + self.nodes[0], 1, f"raw({scriptpubkey.hex()})" + )[0] def test_size_limits(self, filter_peer): - self.log.info('Check that too large filter is rejected') - with self.nodes[0].assert_debug_log(['Misbehaving']): - filter_peer.send_and_ping(msg_filterload( - data=b'\xbb' * (MAX_BLOOM_FILTER_SIZE + 1))) + self.log.info("Check that too large filter is rejected") + with self.nodes[0].assert_debug_log(["Misbehaving"]): + filter_peer.send_and_ping( + msg_filterload(data=b"\xbb" * (MAX_BLOOM_FILTER_SIZE + 1)) + ) - self.log.info('Check that max size filter is accepted') - with self.nodes[0].assert_debug_log([""], unexpected_msgs=['Misbehaving']): + self.log.info("Check that max size filter is accepted") + with self.nodes[0].assert_debug_log([""], unexpected_msgs=["Misbehaving"]): filter_peer.send_and_ping( - msg_filterload( - data=b'\xbb' * - (MAX_BLOOM_FILTER_SIZE))) + msg_filterload(data=b"\xbb" * (MAX_BLOOM_FILTER_SIZE)) + ) filter_peer.send_and_ping(msg_filterclear()) - self.log.info( - 'Check that filter with too many hash functions is rejected') - with self.nodes[0].assert_debug_log(['Misbehaving']): + self.log.info("Check that filter with too many hash functions is rejected") + with self.nodes[0].assert_debug_log(["Misbehaving"]): filter_peer.send_and_ping( - msg_filterload( - data=b'\xaa', - nHashFuncs=MAX_BLOOM_HASH_FUNCS + 1)) + msg_filterload(data=b"\xaa", nHashFuncs=MAX_BLOOM_HASH_FUNCS + 1) + ) - self.log.info('Check that filter with max hash functions is accepted') - with self.nodes[0].assert_debug_log([""], unexpected_msgs=['Misbehaving']): + self.log.info("Check that filter with max hash functions is accepted") + with self.nodes[0].assert_debug_log([""], unexpected_msgs=["Misbehaving"]): filter_peer.send_and_ping( - msg_filterload( - data=b'\xaa', - nHashFuncs=MAX_BLOOM_HASH_FUNCS)) + msg_filterload(data=b"\xaa", nHashFuncs=MAX_BLOOM_HASH_FUNCS) + ) # Don't send filterclear until next two filteradd checks are done self.log.info( - 'Check that max size data element to add to the filter is accepted') - with self.nodes[0].assert_debug_log([""], unexpected_msgs=['Misbehaving']): + "Check that max size data element to add to the filter is accepted" + ) + with self.nodes[0].assert_debug_log([""], unexpected_msgs=["Misbehaving"]): filter_peer.send_and_ping( - msg_filteradd( - data=b'\xcc' * - (MAX_SCRIPT_ELEMENT_SIZE))) + msg_filteradd(data=b"\xcc" * (MAX_SCRIPT_ELEMENT_SIZE)) + ) self.log.info( - 'Check that too large data element to add to the filter is rejected') - with self.nodes[0].assert_debug_log(['Misbehaving']): - filter_peer.send_and_ping(msg_filteradd( - data=b'\xcc' * (MAX_SCRIPT_ELEMENT_SIZE + 1))) + "Check that too large data element to add to the filter is rejected" + ) + with self.nodes[0].assert_debug_log(["Misbehaving"]): + filter_peer.send_and_ping( + msg_filteradd(data=b"\xcc" * (MAX_SCRIPT_ELEMENT_SIZE + 1)) + ) filter_peer.send_and_ping(msg_filterclear()) def test_msg_mempool(self): self.log.info( - "Check that a node with bloom filters enabled services p2p mempool messages") + "Check that a node with bloom filters enabled services p2p mempool messages" + ) filter_peer = P2PBloomFilter() self.log.debug("Create a tx relevant to the peer before connecting") txid, _ = self.wallet.send_to( - from_node=self.nodes[0], scriptPubKey=filter_peer.watch_script_pubkey, amount=9 * COIN) + from_node=self.nodes[0], + scriptPubKey=filter_peer.watch_script_pubkey, + amount=9 * COIN, + ) self.log.debug( - "Send a mempool msg after connecting and check that the tx is received") + "Send a mempool msg after connecting and check that the tx is received" + ) self.nodes[0].add_p2p_connection(filter_peer) filter_peer.send_and_ping(filter_peer.watch_filter_init) filter_peer.send_message(msg_mempool()) filter_peer.wait_for_tx(txid) def test_frelay_false(self, filter_peer): self.log.info( - "Check that a node with fRelay set to false does not receive invs until the filter is set") + "Check that a node with fRelay set to false does not receive invs until the" + " filter is set" + ) filter_peer.tx_received = False self.wallet.send_to( from_node=self.nodes[0], scriptPubKey=filter_peer.watch_script_pubkey, - amount=9 * COIN) + amount=9 * COIN, + ) # Sync to make sure the reason filter_peer doesn't receive the tx is # not p2p delays filter_peer.sync_with_ping() assert not filter_peer.tx_received # Clear the mempool so that this transaction does not impact subsequent # tests self.generate(self.nodes[0], 1) def test_filter(self, filter_peer): # Set the bloomfilter using filterload filter_peer.send_and_ping(filter_peer.watch_filter_init) # If fRelay is not already True, sending filterload sets it to True - assert self.nodes[0].getpeerinfo()[0]['relaytxes'] + assert self.nodes[0].getpeerinfo()[0]["relaytxes"] self.log.info( - 'Check that we receive merkleblock and tx if the filter matches a tx in a block') - block_hash = self.generatetoscriptpubkey( - filter_peer.watch_script_pubkey) - txid = self.nodes[0].getblock(block_hash)['tx'][0] + "Check that we receive merkleblock and tx if the filter matches a tx in a" + " block" + ) + block_hash = self.generatetoscriptpubkey(filter_peer.watch_script_pubkey) + txid = self.nodes[0].getblock(block_hash)["tx"][0] filter_peer.wait_for_merkleblock(block_hash) filter_peer.wait_for_tx(txid) self.log.info( - 'Check that we only receive a merkleblock if the filter does not match a tx in a block') + "Check that we only receive a merkleblock if the filter does not match a tx" + " in a block" + ) filter_peer.tx_received = False block_hash = self.generatetoscriptpubkey(getnewdestination()[1]) filter_peer.wait_for_merkleblock(block_hash) assert not filter_peer.tx_received self.log.info( - 'Check that we not receive a tx if the filter does not match a mempool tx') + "Check that we not receive a tx if the filter does not match a mempool tx" + ) filter_peer.merkleblock_received = False filter_peer.tx_received = False self.wallet.send_to( from_node=self.nodes[0], scriptPubKey=getnewdestination()[1], - amount=7 * COIN) + amount=7 * COIN, + ) filter_peer.sync_send_with_ping() assert not filter_peer.merkleblock_received assert not filter_peer.tx_received - self.log.info( - 'Check that we receive a tx if the filter matches a mempool tx') + self.log.info("Check that we receive a tx if the filter matches a mempool tx") filter_peer.merkleblock_received = False txid, _ = self.wallet.send_to( - from_node=self.nodes[0], scriptPubKey=filter_peer.watch_script_pubkey, amount=9 * COIN) + from_node=self.nodes[0], + scriptPubKey=filter_peer.watch_script_pubkey, + amount=9 * COIN, + ) filter_peer.wait_for_tx(txid) assert not filter_peer.merkleblock_received - self.log.info( - 'Check that after deleting filter all txs get relayed again') + self.log.info("Check that after deleting filter all txs get relayed again") filter_peer.send_and_ping(msg_filterclear()) for _ in range(5): txid, _ = self.wallet.send_to( - from_node=self.nodes[0], scriptPubKey=getnewdestination()[1], amount=7 * COIN) + from_node=self.nodes[0], + scriptPubKey=getnewdestination()[1], + amount=7 * COIN, + ) filter_peer.wait_for_tx(txid) self.log.info( - 'Check that request for filtered blocks is ignored if no filter' - ' is set') + "Check that request for filtered blocks is ignored if no filter is set" + ) filter_peer.merkleblock_received = False filter_peer.tx_received = False - with self.nodes[0].assert_debug_log(expected_msgs=['received getdata']): + with self.nodes[0].assert_debug_log(expected_msgs=["received getdata"]): block_hash = self.generatetoscriptpubkey(getnewdestination()[1]) filter_peer.wait_for_inv([CInv(MSG_BLOCK, int(block_hash, 16))]) filter_peer.sync_with_ping() assert not filter_peer.merkleblock_received assert not filter_peer.tx_received self.log.info( 'Check that sending "filteradd" if no filter is set is treated as ' - 'misbehavior') - with self.nodes[0].assert_debug_log(['Misbehaving']): - filter_peer.send_and_ping(msg_filteradd(data=b'letsmisbehave')) + "misbehavior" + ) + with self.nodes[0].assert_debug_log(["Misbehaving"]): + filter_peer.send_and_ping(msg_filteradd(data=b"letsmisbehave")) self.log.info( - "Check that division-by-zero remote crash bug [CVE-2013-5700] is fixed") - filter_peer.send_and_ping(msg_filterload(data=b'', nHashFuncs=1)) - filter_peer.send_and_ping( - msg_filteradd( - data=b'letstrytocrashthisnode')) + "Check that division-by-zero remote crash bug [CVE-2013-5700] is fixed" + ) + filter_peer.send_and_ping(msg_filterload(data=b"", nHashFuncs=1)) + filter_peer.send_and_ping(msg_filteradd(data=b"letstrytocrashthisnode")) self.nodes[0].disconnect_p2ps() def run_test(self): self.wallet = MiniWallet(self.nodes[0]) self.wallet.rescan_utxos() filter_peer = self.nodes[0].add_p2p_connection(P2PBloomFilter()) - self.log.info('Test filter size limits') + self.log.info("Test filter size limits") self.test_size_limits(filter_peer) - self.log.info('Test BIP 37 for a node with fRelay = True (default)') + self.log.info("Test BIP 37 for a node with fRelay = True (default)") self.test_filter(filter_peer) self.nodes[0].disconnect_p2ps() - self.log.info('Test BIP 37 for a node with fRelay = False') + self.log.info("Test BIP 37 for a node with fRelay = False") # Add peer but do not send version yet filter_peer_without_nrelay = self.nodes[0].add_p2p_connection( - P2PBloomFilter(), send_version=False, wait_for_verack=False) + P2PBloomFilter(), send_version=False, wait_for_verack=False + ) # Send version with relay=False version_without_fRelay = msg_version() version_without_fRelay.nVersion = P2P_VERSION version_without_fRelay.strSubVer = P2P_SUBVERSION version_without_fRelay.nServices = P2P_SERVICES version_without_fRelay.relay = 0 filter_peer_without_nrelay.send_message(version_without_fRelay) filter_peer_without_nrelay.wait_for_verack() - assert not self.nodes[0].getpeerinfo()[0]['relaytxes'] + assert not self.nodes[0].getpeerinfo()[0]["relaytxes"] self.test_frelay_false(filter_peer_without_nrelay) self.test_filter(filter_peer_without_nrelay) self.test_msg_mempool() -if __name__ == '__main__': +if __name__ == "__main__": FilterTest().main() diff --git a/test/functional/p2p_fingerprint.py b/test/functional/p2p_fingerprint.py index d610de89b..beb34d902 100755 --- a/test/functional/p2p_fingerprint.py +++ b/test/functional/p2p_fingerprint.py @@ -1,134 +1,139 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test various fingerprinting protections. If a stale block more than a month old or its header are requested by a peer, the node should pretend that it does not have it to avoid fingerprinting. """ import time from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import ( MSG_BLOCK, CInv, msg_block, msg_getdata, msg_getheaders, msg_headers, ) from test_framework.p2p import P2PInterface, p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class P2PFingerprintTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 # Build a chain of blocks on top of given one def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time): blocks = [] for _ in range(nblocks): coinbase = create_coinbase(prev_height + 1) block_time = prev_median_time + 1 block = create_block(int(prev_hash, 16), coinbase, block_time) block.solve() blocks.append(block) prev_hash = block.hash prev_height += 1 prev_median_time = block_time return blocks # Send a getdata request for a given block hash def send_block_request(self, block_hash, node): msg = msg_getdata() msg.inv.append(CInv(MSG_BLOCK, block_hash)) node.send_message(msg) # Send a getheaders request for a given single block hash def send_header_request(self, block_hash, node): msg = msg_getheaders() msg.hashstop = block_hash node.send_message(msg) # Checks that stale blocks timestamped more than a month ago are not served # by the node while recent stale blocks and old active chain blocks are. # This does not currently test that stale blocks timestamped within the # last month but that have over a month's worth of work are also withheld. def run_test(self): node0 = self.nodes[0].add_p2p_connection(P2PInterface()) # Set node time to 60 days ago self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60) # Generating a chain of 10 blocks - block_hashes = self.generatetoaddress(self.nodes[0], - 10, self.nodes[0].get_deterministic_priv_key().address) + block_hashes = self.generatetoaddress( + self.nodes[0], 10, self.nodes[0].get_deterministic_priv_key().address + ) # Create longer chain starting 2 blocks before current tip height = len(block_hashes) - 2 block_hash = block_hashes[height - 1] block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1 new_blocks = self.build_chain(5, block_hash, height, block_time) # Force reorg to a longer chain node0.send_message(msg_headers(new_blocks)) node0.wait_for_getdata([x.sha256 for x in new_blocks]) for block in new_blocks: node0.send_and_ping(msg_block(block)) # Check that reorg succeeded self.nodes[0].unparkblock(new_blocks[-1].hash) assert_equal(self.nodes[0].getblockcount(), 13) stale_hash = int(block_hashes[-1], 16) # Check that getdata request for stale block succeeds self.send_block_request(stale_hash, node0) node0.wait_for_block(stale_hash, timeout=3) # Check that getheader request for stale block header succeeds self.send_header_request(stale_hash, node0) node0.wait_for_header(hex(stale_hash), timeout=3) # Longest chain is extended so stale is much older than chain tip self.nodes[0].setmocktime(0) - block_hash = int(self.generatetoaddress(self.nodes[0], - 1, self.nodes[0].get_deterministic_priv_key().address)[-1], 16) + block_hash = int( + self.generatetoaddress( + self.nodes[0], 1, self.nodes[0].get_deterministic_priv_key().address + )[-1], + 16, + ) assert_equal(self.nodes[0].getblockcount(), 14) node0.wait_for_block(block_hash, timeout=3) # Request for very old stale block should now fail with p2p_lock: node0.last_message.pop("block", None) self.send_block_request(stale_hash, node0) node0.sync_with_ping() assert "block" not in node0.last_message # Request for very old stale block header should now fail with p2p_lock: node0.last_message.pop("headers", None) self.send_header_request(stale_hash, node0) node0.sync_with_ping() assert "headers" not in node0.last_message # Verify we can fetch very old blocks and headers on the active chain block_hash = int(block_hashes[2], 16) self.send_block_request(block_hash, node0) self.send_header_request(block_hash, node0) node0.sync_with_ping() self.send_block_request(block_hash, node0) node0.wait_for_block(block_hash, timeout=3) self.send_header_request(block_hash, node0) node0.wait_for_header(hex(block_hash), timeout=3) -if __name__ == '__main__': +if __name__ == "__main__": P2PFingerprintTest().main() diff --git a/test/functional/p2p_getaddr_caching.py b/test/functional/p2p_getaddr_caching.py index ac15b4c77..909d01772 100755 --- a/test/functional/p2p_getaddr_caching.py +++ b/test/functional/p2p_getaddr_caching.py @@ -1,92 +1,91 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test addr response caching""" import time from test_framework.messages import msg_getaddr from test_framework.p2p import P2PInterface, p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal # As defined in net_processing. MAX_ADDR_TO_SEND = 1000 MAX_PCT_ADDR_TO_SEND = 23 class AddrReceiver(P2PInterface): - def __init__(self): super().__init__() self.received_addrs = None def get_received_addrs(self): with p2p_lock: return self.received_addrs def on_addr(self, message): self.received_addrs = [] for addr in message.addrs: self.received_addrs.append(addr.ip) def addr_received(self): return self.received_addrs is not None class AddrTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 # TODO When Backporting core#25096, please also includes changes from: # - core#25312 # - core#25333 def run_test(self): - self.log.info('Fill peer AddrMan with a lot of records') + self.log.info("Fill peer AddrMan with a lot of records") for i in range(10000): first_octet = i >> 8 second_octet = i % 256 a = f"{first_octet}.{second_octet}.1.1" self.nodes[0].addpeeraddress(a, 8333) # Need to make sure we hit MAX_ADDR_TO_SEND records in the addr # response later because only a fraction of all known addresses # can be cached and returned. assert len(self.nodes[0].getnodeaddresses(0)) > int( - MAX_ADDR_TO_SEND / (MAX_PCT_ADDR_TO_SEND / 100)) + MAX_ADDR_TO_SEND / (MAX_PCT_ADDR_TO_SEND / 100) + ) responses = [] - self.log.info('Send many addr requests within short time to receive') + self.log.info("Send many addr requests within short time to receive") N = 5 cur_mock_time = int(time.time()) for i in range(N): addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver()) addr_receiver.send_and_ping(msg_getaddr()) # Trigger response cur_mock_time += 5 * 60 self.nodes[0].setmocktime(cur_mock_time) addr_receiver.wait_until(addr_receiver.addr_received) responses.append(addr_receiver.get_received_addrs()) for response in responses[1:]: assert_equal(response, responses[0]) assert len(response) == MAX_ADDR_TO_SEND cur_mock_time += 3 * 24 * 60 * 60 self.nodes[0].setmocktime(cur_mock_time) - self.log.info('After time passed, see a new response to addr request') + self.log.info("After time passed, see a new response to addr request") last_addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver()) last_addr_receiver.send_and_ping(msg_getaddr()) # Trigger response cur_mock_time += 5 * 60 self.nodes[0].setmocktime(cur_mock_time) last_addr_receiver.wait_until(last_addr_receiver.addr_received) # new response is different - assert (set(responses[0]) != set( - last_addr_receiver.get_received_addrs())) + assert set(responses[0]) != set(last_addr_receiver.get_received_addrs()) -if __name__ == '__main__': +if __name__ == "__main__": AddrTest().main() diff --git a/test/functional/p2p_getdata.py b/test/functional/p2p_getdata.py index ead4df8aa..174c9178e 100755 --- a/test/functional/p2p_getdata.py +++ b/test/functional/p2p_getdata.py @@ -1,48 +1,48 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test GETDATA processing behavior""" from collections import defaultdict from test_framework.messages import CInv, msg_getdata from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework class P2PStoreBlock(P2PInterface): def __init__(self): super().__init__() self.blocks = defaultdict(int) def on_block(self, message): message.block.calc_sha256() self.blocks[message.block.sha256] += 1 class GetdataTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 def run_test(self): p2p_block_store = self.nodes[0].add_p2p_connection(P2PStoreBlock()) self.log.info( - "test that an invalid GETDATA doesn't prevent processing of future messages") + "test that an invalid GETDATA doesn't prevent processing of future messages" + ) # Send invalid message and verify that node responds to later ping invalid_getdata = msg_getdata() invalid_getdata.inv.append(CInv(t=0, h=0)) # INV type 0 is invalid. p2p_block_store.send_and_ping(invalid_getdata) # Check getdata still works by fetching tip block best_block = int(self.nodes[0].getbestblockhash(), 16) good_getdata = msg_getdata() good_getdata.inv.append(CInv(t=2, h=best_block)) p2p_block_store.send_and_ping(good_getdata) - p2p_block_store.wait_until( - lambda: p2p_block_store.blocks[best_block] == 1) + p2p_block_store.wait_until(lambda: p2p_block_store.blocks[best_block] == 1) -if __name__ == '__main__': +if __name__ == "__main__": GetdataTest().main() diff --git a/test/functional/p2p_i2p_ports.py b/test/functional/p2p_i2p_ports.py index 54d42b119..4933a6ada 100755 --- a/test/functional/p2p_i2p_ports.py +++ b/test/functional/p2p_i2p_ports.py @@ -1,49 +1,51 @@ #!/usr/bin/env python3 # Copyright (c) 2021-2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test ports handling for I2P hosts """ import re from test_framework.test_framework import BitcoinTestFramework class I2PPorts(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 # The test assumes that an I2P SAM proxy is not listening here. self.extra_args = [["-i2psam=127.0.0.1:60000"]] def run_test(self): node = self.nodes[0] self.log.info("Ensure we don't try to connect if port!=0") addr = "zsxwyo6qcn3chqzwxnseusqgsnuw3maqnztkiypyfxtya4snkoka.b32.i2p:8333" raised = False try: - with node.assert_debug_log( - expected_msgs=[f"Error connecting to {addr}"]): + with node.assert_debug_log(expected_msgs=[f"Error connecting to {addr}"]): node.addnode(node=addr, command="onetry") except AssertionError as e: raised = True if not re.search( - r"Expected messages .* does not partially match log", - str(e)): + r"Expected messages .* does not partially match log", str(e) + ): raise AssertionError( - f"Assertion raised as expected, but with an unexpected message: {str(e)}") + "Assertion raised as expected, but with an unexpected message:" + f" {str(e)}" + ) if not raised: raise AssertionError("Assertion should have been raised") self.log.info( - "Ensure we try to connect if port=0 and get an error due to missing I2P proxy") + "Ensure we try to connect if port=0 and get an error due to missing I2P" + " proxy" + ) addr = "h3r6bkn46qxftwja53pxiykntegfyfjqtnzbm6iv6r5mungmqgmq.b32.i2p:0" - with node.assert_debug_log( - expected_msgs=[f"Error connecting to {addr}"]): + with node.assert_debug_log(expected_msgs=[f"Error connecting to {addr}"]): node.addnode(node=addr, command="onetry") -if __name__ == '__main__': +if __name__ == "__main__": I2PPorts().main() diff --git a/test/functional/p2p_ibd_txrelay.py b/test/functional/p2p_ibd_txrelay.py index 93a683591..069f6e762 100755 --- a/test/functional/p2p_ibd_txrelay.py +++ b/test/functional/p2p_ibd_txrelay.py @@ -1,46 +1,51 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test fee filters during and after IBD.""" from decimal import Decimal from test_framework.messages import XEC from test_framework.test_framework import BitcoinTestFramework MAX_FEE_FILTER = Decimal(9170997) / XEC NORMAL_FEE_FILTER = Decimal(100) / XEC class P2PIBDTxRelayTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [ [f"-minrelaytxfee={NORMAL_FEE_FILTER}"], [f"-minrelaytxfee={NORMAL_FEE_FILTER}"], ] def run_test(self): - self.log.info( - "Check that nodes set minfilter to MAX_MONEY while still in IBD") + self.log.info("Check that nodes set minfilter to MAX_MONEY while still in IBD") for node in self.nodes: - assert node.getblockchaininfo()['initialblockdownload'] - self.wait_until(lambda: all(peer['minfeefilter'] == MAX_FEE_FILTER - for peer in node.getpeerinfo())) + assert node.getblockchaininfo()["initialblockdownload"] + self.wait_until( + lambda: all( + peer["minfeefilter"] == MAX_FEE_FILTER + for peer in node.getpeerinfo() + ) + ) # Come out of IBD by generating a block self.generate(self.nodes[0], 1) - self.log.info( - "Check that nodes reset minfilter after coming out of IBD") + self.log.info("Check that nodes reset minfilter after coming out of IBD") for node in self.nodes: - assert not node.getblockchaininfo()['initialblockdownload'] + assert not node.getblockchaininfo()["initialblockdownload"] self.wait_until( - lambda: all(peer['minfeefilter'] == NORMAL_FEE_FILTER - for peer in node.getpeerinfo())) + lambda: all( + peer["minfeefilter"] == NORMAL_FEE_FILTER + for peer in node.getpeerinfo() + ) + ) -if __name__ == '__main__': +if __name__ == "__main__": P2PIBDTxRelayTest().main() diff --git a/test/functional/p2p_inv_download.py b/test/functional/p2p_inv_download.py index 48db7553e..3ccc44659 100755 --- a/test/functional/p2p_inv_download.py +++ b/test/functional/p2p_inv_download.py @@ -1,477 +1,502 @@ #!/usr/bin/env python3 # Copyright (c) 2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test inventory download behavior """ import functools import time from test_framework.address import ADDRESS_ECREG_UNSPENDABLE from test_framework.avatools import avalanche_proof_from_hex, gen_proof, wait_for_proof from test_framework.key import ECKey from test_framework.messages import ( MSG_AVA_PROOF, MSG_TX, MSG_TYPE_MASK, CInv, CTransaction, FromHex, msg_avaproof, msg_inv, msg_notfound, ) from test_framework.p2p import P2PInterface, p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error, uint256_hex from test_framework.wallet_util import bytes_to_wif class TestP2PConn(P2PInterface): def __init__(self, inv_type): super().__init__() self.inv_type = inv_type self.getdata_count = 0 def on_getdata(self, message): for i in message.inv: if i.type & MSG_TYPE_MASK == self.inv_type: self.getdata_count += 1 class NetConstants: """Constants from net_processing""" - def __init__(self, - getdata_interval, - inbound_peer_delay, - overloaded_peer_delay, - max_getdata_in_flight, - max_peer_announcements, - bypass_request_limits_permission_flags, - ): + def __init__( + self, + getdata_interval, + inbound_peer_delay, + overloaded_peer_delay, + max_getdata_in_flight, + max_peer_announcements, + bypass_request_limits_permission_flags, + ): self.getdata_interval = getdata_interval self.inbound_peer_delay = inbound_peer_delay self.overloaded_peer_delay = overloaded_peer_delay self.max_getdata_in_flight = max_getdata_in_flight self.max_peer_announcements = max_peer_announcements self.max_getdata_inbound_wait = self.getdata_interval + self.inbound_peer_delay - self.bypass_request_limits_permission_flags = bypass_request_limits_permission_flags + self.bypass_request_limits_permission_flags = ( + bypass_request_limits_permission_flags + ) class TestContext: def __init__(self, inv_type, inv_name, constants): self.inv_type = inv_type self.inv_name = inv_name self.constants = constants def p2p_conn(self): return TestP2PConn(self.inv_type) PROOF_TEST_CONTEXT = TestContext( MSG_AVA_PROOF, "avalanche proof", NetConstants( getdata_interval=60, # seconds inbound_peer_delay=2, # seconds overloaded_peer_delay=2, # seconds max_getdata_in_flight=100, max_peer_announcements=5000, bypass_request_limits_permission_flags="bypass_proof_request_limits", ), ) TX_TEST_CONTEXT = TestContext( MSG_TX, "transaction", NetConstants( getdata_interval=60, # seconds inbound_peer_delay=2, # seconds overloaded_peer_delay=2, # seconds max_getdata_in_flight=100, max_peer_announcements=5000, bypass_request_limits_permission_flags="relay", ), ) # Python test constants NUM_INBOUND = 10 # Common network parameters UNCONDITIONAL_RELAY_DELAY = 2 * 60 def skip(context): def decorator(test): @functools.wraps(test) def wrapper(*args, **kwargs): # Assume the signature is test(self, context) unless context is # passed by name call_context = kwargs.get("context", args[1]) if call_context == context: return lambda *args, **kwargs: None return test(*args, **kwargs) + return wrapper + return decorator class InventoryDownloadTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 - self.extra_args = [['-avaproofstakeutxodustthreshold=1000000', - '-avaproofstakeutxoconfirmations=1', - '-avacooldown=0']] * self.num_nodes + self.extra_args = [ + [ + "-avaproofstakeutxodustthreshold=1000000", + "-avaproofstakeutxoconfirmations=1", + "-avacooldown=0", + ] + ] * self.num_nodes def test_data_requests(self, context): - self.log.info( - "Test that we request data from all our peers, eventually") + self.log.info("Test that we request data from all our peers, eventually") - invid = 0xdeadbeef + invid = 0xDEADBEEF self.log.info("Announce the invid from each incoming peer to node 0") msg = msg_inv([CInv(t=context.inv_type, h=invid)]) for p in self.nodes[0].p2ps: p.send_and_ping(msg) outstanding_peer_index = list(range(len(self.nodes[0].p2ps))) def getdata_found(peer_index): p = self.nodes[0].p2ps[peer_index] with p2p_lock: - return p.last_message.get( - "getdata") and p.last_message["getdata"].inv[-1].hash == invid + return ( + p.last_message.get("getdata") + and p.last_message["getdata"].inv[-1].hash == invid + ) node_0_mocktime = int(time.time()) while outstanding_peer_index: node_0_mocktime += context.constants.max_getdata_inbound_wait self.nodes[0].setmocktime(node_0_mocktime) - self.wait_until(lambda: any(getdata_found(i) - for i in outstanding_peer_index)) + self.wait_until( + lambda: any(getdata_found(i) for i in outstanding_peer_index) + ) for i in outstanding_peer_index: if getdata_found(i): outstanding_peer_index.remove(i) self.nodes[0].setmocktime(0) self.log.info("All outstanding peers received a getdata") @skip(PROOF_TEST_CONTEXT) def test_inv_tx(self, context): self.log.info("Generate a transaction on node 0") tx = self.nodes[0].createrawtransaction( - inputs=[{ - # coinbase - "txid": self.nodes[0].getblock(self.nodes[0].getblockhash(1))['tx'][0], - "vout": 0 - }], + inputs=[ + { + # coinbase + "txid": self.nodes[0].getblock(self.nodes[0].getblockhash(1))["tx"][ + 0 + ], + "vout": 0, + } + ], outputs={ADDRESS_ECREG_UNSPENDABLE: 50000000 - 250.00}, ) tx = self.nodes[0].signrawtransactionwithkey( hexstring=tx, privkeys=[self.nodes[0].get_deterministic_priv_key().key], - )['hex'] + )["hex"] ctx = FromHex(CTransaction(), tx) txid = int(ctx.rehash(), 16) self.log.info( f"Announce the transaction to all nodes from all {NUM_INBOUND} incoming " - "peers, but never send it") + "peers, but never send it" + ) msg = msg_inv([CInv(t=context.inv_type, h=txid)]) for p in self.peers: p.send_and_ping(msg) self.log.info("Put the tx in node 0's mempool") self.nodes[0].sendrawtransaction(tx) # node1 is an inbound peer for node0, so the tx relay is delayed by a # duration calculated using a poisson's law with a 5s average time. # In order to make sure the inv is sent we move the time 2 minutes # forward, which has the added side effect that the tx can be # unconditionally requested. with self.nodes[1].assert_debug_log( - [f"got inv: tx {uint256_hex(txid)} new peer=0"]): - self.nodes[0].setmocktime( - int(time.time()) + UNCONDITIONAL_RELAY_DELAY) + [f"got inv: tx {uint256_hex(txid)} new peer=0"] + ): + self.nodes[0].setmocktime(int(time.time()) + UNCONDITIONAL_RELAY_DELAY) # Since node 1 is connected outbound to an honest peer (node 0), it # should get the tx within a timeout. # The timeout is the sum of # * the worst case until the tx is first requested from an inbound # peer, plus # * the first time it is re-requested from the outbound peer, plus # * 2 seconds to avoid races - assert self.nodes[1].getpeerinfo()[0]['inbound'] is False - max_delay = context.constants.inbound_peer_delay + \ - context.constants.getdata_interval + assert self.nodes[1].getpeerinfo()[0]["inbound"] is False + max_delay = ( + context.constants.inbound_peer_delay + context.constants.getdata_interval + ) margin = 2 self.log.info( - f"Tx should be received at node 1 after {max_delay + margin} seconds") + f"Tx should be received at node 1 after {max_delay + margin} seconds" + ) self.nodes[1].setmocktime(int(time.time()) + max_delay) self.sync_mempools(timeout=margin) def test_in_flight_max(self, context): max_getdata_in_flight = context.constants.max_getdata_in_flight - max_inbound_delay = context.constants.inbound_peer_delay + \ - context.constants.overloaded_peer_delay + max_inbound_delay = ( + context.constants.inbound_peer_delay + + context.constants.overloaded_peer_delay + ) self.log.info( f"Test that we don't load peers with more than {max_getdata_in_flight} " - "getdata requests immediately") + "getdata requests immediately" + ) invids = list(range(max_getdata_in_flight + 2)) p = self.nodes[0].p2ps[0] with p2p_lock: p.getdata_count = 0 mock_time = int(time.time() + 1) self.nodes[0].setmocktime(mock_time) for i in range(max_getdata_in_flight): p.send_message(msg_inv([CInv(t=context.inv_type, h=invids[i])])) p.sync_with_ping() mock_time += context.constants.inbound_peer_delay self.nodes[0].setmocktime(mock_time) p.wait_until(lambda: p.getdata_count >= max_getdata_in_flight) for i in range(max_getdata_in_flight, len(invids)): p.send_message(msg_inv([CInv(t=context.inv_type, h=invids[i])])) p.sync_with_ping() self.log.info( f"No more than {max_getdata_in_flight} requests should be seen within " - f"{max_inbound_delay - 1} seconds after announcement") - self.nodes[0].setmocktime( - mock_time + - max_inbound_delay - 1) + f"{max_inbound_delay - 1} seconds after announcement" + ) + self.nodes[0].setmocktime(mock_time + max_inbound_delay - 1) p.sync_with_ping() with p2p_lock: assert_equal(p.getdata_count, max_getdata_in_flight) self.log.info( f"If we wait {max_inbound_delay} seconds after announcement, we should " - f"eventually get more requests") - self.nodes[0].setmocktime( - mock_time + - max_inbound_delay) + "eventually get more requests" + ) + self.nodes[0].setmocktime(mock_time + max_inbound_delay) p.wait_until(lambda: p.getdata_count == len(invids)) def test_expiry_fallback(self, context): - self.log.info( - 'Check that expiry will select another peer for download') + self.log.info("Check that expiry will select another peer for download") peer1 = self.nodes[0].add_p2p_connection(context.p2p_conn()) peer2 = self.nodes[0].add_p2p_connection(context.p2p_conn()) for p in [peer1, peer2]: - p.send_message(msg_inv([CInv(t=context.inv_type, h=0xffaa)])) + p.send_message(msg_inv([CInv(t=context.inv_type, h=0xFFAA)])) # One of the peers is asked for the data - peer2.wait_until( - lambda: sum( - p.getdata_count for p in [ - peer1, peer2]) == 1) + peer2.wait_until(lambda: sum(p.getdata_count for p in [peer1, peer2]) == 1) with p2p_lock: peer_expiry, peer_fallback = ( - peer1, peer2) if peer1.getdata_count == 1 else ( - peer2, peer1) + (peer1, peer2) if peer1.getdata_count == 1 else (peer2, peer1) + ) assert_equal(peer_fallback.getdata_count, 0) # Wait for request to peer_expiry to expire self.nodes[0].setmocktime( - int(time.time()) + context.constants.getdata_interval + 1) - peer_fallback.wait_until( - lambda: peer_fallback.getdata_count >= 1) + int(time.time()) + context.constants.getdata_interval + 1 + ) + peer_fallback.wait_until(lambda: peer_fallback.getdata_count >= 1) with p2p_lock: assert_equal(peer_fallback.getdata_count, 1) # reset mocktime self.restart_node(0) def test_disconnect_fallback(self, context): - self.log.info( - 'Check that disconnect will select another peer for download') + self.log.info("Check that disconnect will select another peer for download") peer1 = self.nodes[0].add_p2p_connection(context.p2p_conn()) peer2 = self.nodes[0].add_p2p_connection(context.p2p_conn()) for p in [peer1, peer2]: - p.send_message(msg_inv([CInv(t=context.inv_type, h=0xffbb)])) + p.send_message(msg_inv([CInv(t=context.inv_type, h=0xFFBB)])) # One of the peers is asked for the data - peer2.wait_until( - lambda: sum( - p.getdata_count for p in [ - peer1, peer2]) == 1) + peer2.wait_until(lambda: sum(p.getdata_count for p in [peer1, peer2]) == 1) with p2p_lock: peer_disconnect, peer_fallback = ( - peer1, peer2) if peer1.getdata_count == 1 else ( - peer2, peer1) + (peer1, peer2) if peer1.getdata_count == 1 else (peer2, peer1) + ) assert_equal(peer_fallback.getdata_count, 0) peer_disconnect.peer_disconnect() peer_disconnect.wait_for_disconnect() - peer_fallback.wait_until( - lambda: peer_fallback.getdata_count >= 1) + peer_fallback.wait_until(lambda: peer_fallback.getdata_count >= 1) with p2p_lock: assert_equal(peer_fallback.getdata_count, 1) def test_notfound_fallback(self, context): self.log.info( - 'Check that notfounds will select another peer for download immediately') + "Check that notfounds will select another peer for download immediately" + ) peer1 = self.nodes[0].add_p2p_connection(context.p2p_conn()) peer2 = self.nodes[0].add_p2p_connection(context.p2p_conn()) for p in [peer1, peer2]: - p.send_message(msg_inv([CInv(t=context.inv_type, h=0xffdd)])) + p.send_message(msg_inv([CInv(t=context.inv_type, h=0xFFDD)])) # One of the peers is asked for the data - peer2.wait_until( - lambda: sum( - p.getdata_count for p in [ - peer1, peer2]) == 1) + peer2.wait_until(lambda: sum(p.getdata_count for p in [peer1, peer2]) == 1) with p2p_lock: peer_notfound, peer_fallback = ( - peer1, peer2) if peer1.getdata_count == 1 else ( - peer2, peer1) + (peer1, peer2) if peer1.getdata_count == 1 else (peer2, peer1) + ) assert_equal(peer_fallback.getdata_count, 0) # Send notfound, so that fallback peer is selected - peer_notfound.send_and_ping(msg_notfound( - vec=[CInv(context.inv_type, 0xffdd)])) - peer_fallback.wait_until( - lambda: peer_fallback.getdata_count >= 1) + peer_notfound.send_and_ping(msg_notfound(vec=[CInv(context.inv_type, 0xFFDD)])) + peer_fallback.wait_until(lambda: peer_fallback.getdata_count >= 1) with p2p_lock: assert_equal(peer_fallback.getdata_count, 1) def test_preferred_inv(self, context): - self.log.info( - 'Check that invs from preferred peers are downloaded immediately') + self.log.info("Check that invs from preferred peers are downloaded immediately") self.restart_node( - 0, - extra_args=self.extra_args[0] + - ['-whitelist=noban@127.0.0.1']) + 0, extra_args=self.extra_args[0] + ["-whitelist=noban@127.0.0.1"] + ) peer = self.nodes[0].add_p2p_connection(context.p2p_conn()) - peer.send_message(msg_inv([CInv(t=context.inv_type, h=0xff00ff00)])) + peer.send_message(msg_inv([CInv(t=context.inv_type, h=0xFF00FF00)])) peer.wait_until(lambda: peer.getdata_count >= 1) with p2p_lock: assert_equal(peer.getdata_count, 1) def test_large_inv_batch(self, context): max_peer_announcements = context.constants.max_peer_announcements net_permissions = context.constants.bypass_request_limits_permission_flags self.log.info( - f'Test how large inv batches are handled with {net_permissions} permission') + f"Test how large inv batches are handled with {net_permissions} permission" + ) self.restart_node( 0, - extra_args=self.extra_args[0] + - [f'-whitelist={net_permissions}@127.0.0.1']) + extra_args=self.extra_args[0] + [f"-whitelist={net_permissions}@127.0.0.1"], + ) peer = self.nodes[0].add_p2p_connection(context.p2p_conn()) - peer.send_message(msg_inv([CInv(t=context.inv_type, h=invid) - for invid in range(max_peer_announcements + 1)])) - peer.wait_until(lambda: peer.getdata_count == - max_peer_announcements + 1) + peer.send_message( + msg_inv( + [ + CInv(t=context.inv_type, h=invid) + for invid in range(max_peer_announcements + 1) + ] + ) + ) + peer.wait_until(lambda: peer.getdata_count == max_peer_announcements + 1) self.log.info( - f'Test how large inv batches are handled without {net_permissions} permission') + "Test how large inv batches are handled without" + f" {net_permissions} permission" + ) self.restart_node(0) peer = self.nodes[0].add_p2p_connection(context.p2p_conn()) - peer.send_message(msg_inv([CInv(t=context.inv_type, h=invid) - for invid in range(max_peer_announcements + 1)])) - peer.wait_until(lambda: peer.getdata_count == - max_peer_announcements) + peer.send_message( + msg_inv( + [ + CInv(t=context.inv_type, h=invid) + for invid in range(max_peer_announcements + 1) + ] + ) + ) + peer.wait_until(lambda: peer.getdata_count == max_peer_announcements) peer.sync_with_ping() with p2p_lock: assert_equal(peer.getdata_count, max_peer_announcements) def test_spurious_notfound(self, context): - self.log.info('Check that spurious notfound is ignored') + self.log.info("Check that spurious notfound is ignored") self.nodes[0].p2ps[0].send_message( - msg_notfound(vec=[CInv(context.inv_type, 1)])) + msg_notfound(vec=[CInv(context.inv_type, 1)]) + ) @skip(TX_TEST_CONTEXT) def test_immature_download(self, context): node = self.nodes[0] # Build a proof with immature utxos privkey, immature = gen_proof(self, node) proofid_hex = uint256_hex(immature.proofid) - self.restart_node(0, extra_args=self.extra_args[0] + [ - "-avaproofstakeutxoconfirmations=3", - f"-avaproof={immature.serialize().hex()}", - f"-avamasterkey={bytes_to_wif(privkey.get_bytes())}", - ]) + self.restart_node( + 0, + extra_args=self.extra_args[0] + + [ + "-avaproofstakeutxoconfirmations=3", + f"-avaproof={immature.serialize().hex()}", + f"-avamasterkey={bytes_to_wif(privkey.get_bytes())}", + ], + ) # Add an inbound so the node proof can be registered and advertised node.add_p2p_connection(P2PInterface()) self.generate(node, 1, sync_fun=self.no_op) wait_for_proof(node, proofid_hex, expect_status="immature") peer = node.add_p2p_connection(context.p2p_conn()) - peer.send_message( - msg_inv([CInv(t=context.inv_type, h=immature.proofid)])) + peer.send_message(msg_inv([CInv(t=context.inv_type, h=immature.proofid)])) # Give enough time for the node to eventually request the proof. - node.setmocktime(int(time.time()) + - context.constants.getdata_interval + 1) + node.setmocktime(int(time.time()) + context.constants.getdata_interval + 1) peer.sync_with_ping() assert_equal(peer.getdata_count, 0) @skip(TX_TEST_CONTEXT) def test_request_invalid_once(self, context): node = self.nodes[0] privkey = ECKey() privkey.generate() # Build an invalid proof (no stake) no_stake_hex = node.buildavalancheproof( 42, 2000000000, bytes_to_wif(privkey.get_bytes()), [] ) no_stake = avalanche_proof_from_hex(no_stake_hex) - assert_raises_rpc_error(-8, - "The proof is invalid: no-stake", - node.verifyavalancheproof, - no_stake_hex) + assert_raises_rpc_error( + -8, + "The proof is invalid: no-stake", + node.verifyavalancheproof, + no_stake_hex, + ) # Send the proof msg = msg_avaproof() msg.proof = no_stake node.p2ps[0].send_message(msg) # Check we get banned node.p2ps[0].wait_for_disconnect() # Now that the node knows the proof is invalid, it should not be # requested anymore node.p2ps[1].send_message( - msg_inv([CInv(t=context.inv_type, h=no_stake.proofid)])) + msg_inv([CInv(t=context.inv_type, h=no_stake.proofid)]) + ) # Give enough time for the node to eventually request the proof - node.setmocktime(int(time.time()) + - context.constants.getdata_interval + 1) + node.setmocktime(int(time.time()) + context.constants.getdata_interval + 1) node.p2ps[1].sync_with_ping() assert all(p.getdata_count == 0 for p in node.p2ps[1:]) def run_test(self): for context in [TX_TEST_CONTEXT, PROOF_TEST_CONTEXT]: - self.log.info( - f"Starting tests using {context.inv_name} inventory type") + self.log.info(f"Starting tests using {context.inv_name} inventory type") # Run tests without mocktime that only need one peer-connection first, # to avoid restarting the nodes self.test_expiry_fallback(context) self.test_disconnect_fallback(context) self.test_notfound_fallback(context) self.test_preferred_inv(context) self.test_large_inv_batch(context) self.test_spurious_notfound(context) # Run each test against new bitcoind instances, as setting mocktimes has long-term effects on when # the next trickle relay event happens. - for test in [self.test_in_flight_max, self.test_inv_tx, - self.test_data_requests, self.test_immature_download, self.test_request_invalid_once]: + for test in [ + self.test_in_flight_max, + self.test_inv_tx, + self.test_data_requests, + self.test_immature_download, + self.test_request_invalid_once, + ]: self.stop_nodes() self.start_nodes() self.connect_nodes(1, 0) # Setup the p2p connections self.peers = [] for node in self.nodes: for _ in range(NUM_INBOUND): - self.peers.append( - node.add_p2p_connection( - context.p2p_conn())) + self.peers.append(node.add_p2p_connection(context.p2p_conn())) self.log.info( - f"Nodes are setup with {NUM_INBOUND} incoming connections each") + f"Nodes are setup with {NUM_INBOUND} incoming connections each" + ) test(context) -if __name__ == '__main__': +if __name__ == "__main__": InventoryDownloadTest().main() diff --git a/test/functional/p2p_invalid_block.py b/test/functional/p2p_invalid_block.py index 691690ea0..3f752f16e 100755 --- a/test/functional/p2p_invalid_block.py +++ b/test/functional/p2p_invalid_block.py @@ -1,185 +1,185 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid blocks. In this test we connect to one node over p2p, and test block requests: 1) Valid blocks should be requested and become chain tip. 2) Invalid block with duplicated transaction should be re-requested. 3) Invalid block with bad coinbase value should be rejected and not re-requested. 4) Invalid block due to future timestamp is later accepted when that timestamp becomes valid. """ import copy import time from test_framework.blocktools import ( MAX_FUTURE_BLOCK_TIME, create_block, create_coinbase, create_tx_with_script, make_conform_to_ctor, ) from test_framework.messages import COIN from test_framework.p2p import P2PDataStore from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class InvalidBlockRequestTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.extra_args = [["-whitelist=noban@127.0.0.1"]] def run_test(self): # Add p2p connection to node0 node = self.nodes[0] # convenience reference to the node peer = node.add_p2p_connection(P2PDataStore()) best_block = node.getblock(node.getbestblockhash()) tip = int(node.getbestblockhash(), 16) height = best_block["height"] + 1 block_time = best_block["time"] + 1 self.log.info("Create a new block with an anyone-can-spend coinbase") height = 1 block = create_block(tip, create_coinbase(height), block_time) block.solve() # Save the coinbase for later block1 = block tip = block.sha256 peer.send_blocks_and_test([block1], node, success=True) self.log.info("Mature the block.") - self.generatetoaddress( - node, 100, node.get_deterministic_priv_key().address) + self.generatetoaddress(node, 100, node.get_deterministic_priv_key().address) best_block = node.getblock(node.getbestblockhash()) tip = int(node.getbestblockhash(), 16) height = best_block["height"] + 1 block_time = best_block["time"] + 1 # Use merkle-root malleability to generate an invalid block with # same blockheader (CVE-2012-2459). # Manufacture a block with 3 transactions (coinbase, spend of prior # coinbase, spend of that spend). Duplicate the 3rd transaction to # leave merkle root and blockheader unchanged but invalidate the block. # For more information on merkle-root malleability see # src/consensus/merkle.cpp. self.log.info("Test merkle root malleability.") block2 = create_block(tip, create_coinbase(height), block_time) block_time += 1 # b'0x51' is OP_TRUE - tx1 = create_tx_with_script( - block1.vtx[0], 0, script_sig=b'', amount=50 * COIN) - tx2 = create_tx_with_script( - tx1, 0, script_sig=b'\x51', amount=50 * COIN) + tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b"", amount=50 * COIN) + tx2 = create_tx_with_script(tx1, 0, script_sig=b"\x51", amount=50 * COIN) block2.vtx.extend([tx1, tx2]) - block2.vtx = [block2.vtx[0]] + \ - sorted(block2.vtx[1:], key=lambda tx: tx.get_id()) + block2.vtx = [block2.vtx[0]] + sorted( + block2.vtx[1:], key=lambda tx: tx.get_id() + ) block2.hashMerkleRoot = block2.calc_merkle_root() block2.rehash() block2.solve() orig_hash = block2.sha256 block2_orig = copy.deepcopy(block2) # Mutate block 2 block2.vtx.append(block2.vtx[2]) assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root()) assert_equal(orig_hash, block2.rehash()) assert block2_orig.vtx != block2.vtx peer.send_blocks_and_test( - [block2], node, success=False, reject_reason='bad-txns-duplicate') + [block2], node, success=False, reject_reason="bad-txns-duplicate" + ) # Check transactions for duplicate inputs (CVE-2018-17144) self.log.info("Test duplicate input block.") block2_dup = copy.deepcopy(block2_orig) block2_dup.vtx[2].vin.append(block2_dup.vtx[2].vin[0]) block2_dup.vtx[2].rehash() make_conform_to_ctor(block2_dup) block2_dup.hashMerkleRoot = block2_dup.calc_merkle_root() block2_dup.rehash() block2_dup.solve() peer.send_blocks_and_test( - [block2_dup], node, success=False, - reject_reason='bad-txns-inputs-duplicate') + [block2_dup], node, success=False, reject_reason="bad-txns-inputs-duplicate" + ) self.log.info("Test very broken block.") block3 = create_block(tip, create_coinbase(height), block_time) block_time += 1 block3.vtx[0].vout[0].nValue = 100 * COIN # Too high! block3.vtx[0].sha256 = None block3.vtx[0].calc_sha256() block3.hashMerkleRoot = block3.calc_merkle_root() block3.rehash() block3.solve() peer.send_blocks_and_test( - [block3], node, success=False, reject_reason='bad-cb-amount') + [block3], node, success=False, reject_reason="bad-cb-amount" + ) # Complete testing of CVE-2012-2459 by sending the original block. # It should be accepted even though it has the same hash as the mutated # one. - self.log.info("Test accepting original block after rejecting its" - " mutated version.") - peer.send_blocks_and_test([block2_orig], node, success=True, - timeout=5) + self.log.info( + "Test accepting original block after rejecting its mutated version." + ) + peer.send_blocks_and_test([block2_orig], node, success=True, timeout=5) # Update tip info height += 1 block_time += 1 tip = int(block2_orig.hash, 16) # Complete testing of CVE-2018-17144, by checking for the inflation bug. # Create a block that spends the output of a tx in a previous block. block4 = create_block(tip, create_coinbase(height), block_time) - tx3 = create_tx_with_script(tx2, 0, script_sig=b'\x51', - amount=50 * COIN) + tx3 = create_tx_with_script(tx2, 0, script_sig=b"\x51", amount=50 * COIN) # Duplicates input tx3.vin.append(tx3.vin[0]) tx3.rehash() block4.vtx.append(tx3) make_conform_to_ctor(block4) block4.hashMerkleRoot = block4.calc_merkle_root() block4.rehash() block4.solve() self.log.info("Test inflation by duplicating input") - peer.send_blocks_and_test([block4], node, success=False, - reject_reason='bad-txns-inputs-duplicate') + peer.send_blocks_and_test( + [block4], node, success=False, reject_reason="bad-txns-inputs-duplicate" + ) self.log.info( - "Test accepting identical block after rejecting it due to a future timestamp.") + "Test accepting identical block after rejecting it due to a future" + " timestamp." + ) t = int(time.time()) node.setmocktime(t) # Set block time +1 second past max future validity - block = create_block(tip, create_coinbase( - height), t + MAX_FUTURE_BLOCK_TIME + 1) + block = create_block( + tip, create_coinbase(height), t + MAX_FUTURE_BLOCK_TIME + 1 + ) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Need force_send because the block will get rejected without a getdata # otherwise peer.send_blocks_and_test( - [block], - node, - force_send=True, - success=False, - reject_reason='time-too-new') + [block], node, force_send=True, success=False, reject_reason="time-too-new" + ) node.setmocktime(t + 1) peer.send_blocks_and_test([block], node, success=True) -if __name__ == '__main__': +if __name__ == "__main__": InvalidBlockRequestTest().main() diff --git a/test/functional/p2p_invalid_locator.py b/test/functional/p2p_invalid_locator.py index b344752e2..e31b7aece 100755 --- a/test/functional/p2p_invalid_locator.py +++ b/test/functional/p2p_invalid_locator.py @@ -1,50 +1,54 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid locators. """ from test_framework.messages import MAX_LOCATOR_SZ, msg_getblocks, msg_getheaders from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework class InvalidLocatorTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 def run_test(self): # Convenience reference to the node node = self.nodes[0] # Get node out of IBD - self.generatetoaddress( - node, 1, node.get_deterministic_priv_key().address) + self.generatetoaddress(node, 1, node.get_deterministic_priv_key().address) - self.log.info('Test max locator size') + self.log.info("Test max locator size") block_count = node.getblockcount() for msg in [msg_getheaders(), msg_getblocks()]: self.log.info( - f'Wait for disconnect when sending {MAX_LOCATOR_SZ + 1} hashes in ' - f'locator') + f"Wait for disconnect when sending {MAX_LOCATOR_SZ + 1} hashes in " + "locator" + ) exceed_max_peer = node.add_p2p_connection(P2PInterface()) - msg.locator.vHave = [int(node.getblockhash( - i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ + 1), -1)] + msg.locator.vHave = [ + int(node.getblockhash(i - 1), 16) + for i in range(block_count, block_count - (MAX_LOCATOR_SZ + 1), -1) + ] exceed_max_peer.send_message(msg) exceed_max_peer.wait_for_disconnect() self.log.info( - f'Wait for response when sending {MAX_LOCATOR_SZ} hashes in locator') + f"Wait for response when sending {MAX_LOCATOR_SZ} hashes in locator" + ) within_max_peer = node.add_p2p_connection(P2PInterface()) - msg.locator.vHave = [int(node.getblockhash( - i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ), -1)] + msg.locator.vHave = [ + int(node.getblockhash(i - 1), 16) + for i in range(block_count, block_count - (MAX_LOCATOR_SZ), -1) + ] within_max_peer.send_message(msg) if isinstance(msg, msg_getheaders): within_max_peer.wait_for_header(node.getbestblockhash()) else: - within_max_peer.wait_for_block( - int(node.getbestblockhash(), 16)) + within_max_peer.wait_for_block(int(node.getbestblockhash(), 16)) -if __name__ == '__main__': +if __name__ == "__main__": InvalidLocatorTest().main() diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index 1ba17e58f..8357a1d9c 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -1,297 +1,323 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid network messages.""" import struct import time from test_framework.messages import ( MAX_HEADERS_RESULTS, MAX_INV_SIZE, MAX_PROTOCOL_MESSAGE_LENGTH, MSG_TX, CBlockHeader, CInv, msg_getdata, msg_headers, msg_inv, msg_ping, ser_string, ) from test_framework.p2p import P2PDataStore, P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal # Account for the 5-byte length prefix VALID_DATA_LIMIT = MAX_PROTOCOL_MESSAGE_LENGTH - 5 class msg_unrecognized: """Nonsensical message. Modeled after similar types in test_framework.messages.""" - msgtype = b'badmsg' + msgtype = b"badmsg" def __init__(self, *, str_data): - self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data + self.str_data = ( + str_data.encode() if not isinstance(str_data, bytes) else str_data + ) def serialize(self): return ser_string(self.str_data) def __repr__(self): return f"{self.msgtype}(data={self.str_data})" class SenderOfAddrV2(P2PInterface): def wait_for_sendaddrv2(self): - self.wait_until(lambda: 'sendaddrv2' in self.last_message) + self.wait_until(lambda: "sendaddrv2" in self.last_message) class InvalidMessagesTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.extra_args = [["-whitelist=addr@127.0.0.1"]] def run_test(self): self.test_buffer() self.test_magic_bytes() self.test_checksum() self.test_size() self.test_msgtype() self.test_addrv2_empty() self.test_addrv2_no_addresses() self.test_addrv2_too_long_address() self.test_addrv2_unrecognized_network() self.test_oversized_inv_msg() self.test_oversized_getdata_msg() self.test_oversized_headers_msg() self.test_resource_exhaustion() def test_buffer(self): - self.log.info( - "Test message with header split across two buffers is received") + self.log.info("Test message with header split across two buffers is received") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) # Create valid message msg = conn.build_message(msg_ping(nonce=12345)) # Chosen at an arbitrary position within the header cut_pos = 12 # Send message in two pieces - before = self.nodes[0].getnettotals()['totalbytesrecv'] + before = self.nodes[0].getnettotals()["totalbytesrecv"] conn.send_raw_message(msg[:cut_pos]) # Wait until node has processed the first half of the message self.wait_until( - lambda: self.nodes[0].getnettotals()['totalbytesrecv'] != before) - middle = self.nodes[0].getnettotals()['totalbytesrecv'] + lambda: self.nodes[0].getnettotals()["totalbytesrecv"] != before + ) + middle = self.nodes[0].getnettotals()["totalbytesrecv"] # If this assert fails, we've hit an unlikely race # where the test framework sent a message in between the two halves assert_equal(middle, before + cut_pos) conn.send_raw_message(msg[cut_pos:]) conn.sync_with_ping(timeout=1) self.nodes[0].disconnect_p2ps() def test_magic_bytes(self): self.log.info("Test message with invalid magic bytes disconnects peer") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) - with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART badmsg']): + with self.nodes[0].assert_debug_log( + ["PROCESSMESSAGE: INVALID MESSAGESTART badmsg"] + ): msg = conn.build_message(msg_unrecognized(str_data="d")) # modify magic bytes - msg = b'\xff' * 4 + msg[4:] + msg = b"\xff" * 4 + msg[4:] conn.send_raw_message(msg) conn.wait_for_disconnect(timeout=1) self.nodes[0].disconnect_p2ps() def test_checksum(self): self.log.info("Test message with invalid checksum logs an error") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) - with self.nodes[0].assert_debug_log(['CHECKSUM ERROR (badmsg, 2 bytes), expected 78df0a04 was ffffffff']): + with self.nodes[0].assert_debug_log( + ["CHECKSUM ERROR (badmsg, 2 bytes), expected 78df0a04 was ffffffff"] + ): msg = conn.build_message(msg_unrecognized(str_data="d")) # Checksum is after start bytes (4B), message type (12B), len (4B) cut_len = 4 + 12 + 4 # modify checksum - msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:] + msg = msg[:cut_len] + b"\xff" * 4 + msg[cut_len + 4 :] conn.send_raw_message(msg) conn.wait_for_disconnect() self.nodes[0].disconnect_p2ps() def test_size(self): self.log.info("Test message with oversized payload disconnects peer") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) - with self.nodes[0].assert_debug_log(['']): + with self.nodes[0].assert_debug_log([""]): msg = msg_unrecognized(str_data="d" * (VALID_DATA_LIMIT + 1)) msg = conn.build_message(msg) conn.send_raw_message(msg) conn.wait_for_disconnect(timeout=1) self.nodes[0].disconnect_p2ps() def test_msgtype(self): self.log.info("Test message with invalid message type logs an error") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) - with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: ERRORS IN HEADER']): + with self.nodes[0].assert_debug_log(["PROCESSMESSAGE: ERRORS IN HEADER"]): msg = msg_unrecognized(str_data="d") - msg.msgtype = b'\xff' * 12 + msg.msgtype = b"\xff" * 12 msg = conn.build_message(msg) # Modify msgtype - msg = msg[:7] + b'\x00' + msg[7 + 1:] + msg = msg[:7] + b"\x00" + msg[7 + 1 :] conn.send_raw_message(msg) conn.sync_with_ping(timeout=1) # Check that traffic is accounted for (24 bytes header + 2 bytes # payload) - assert_equal( - self.nodes[0].getpeerinfo()[0]['bytesrecv_per_msg']['*other*'], 26) + assert_equal(self.nodes[0].getpeerinfo()[0]["bytesrecv_per_msg"]["*other*"], 26) self.nodes[0].disconnect_p2ps() def test_addrv2(self, label, required_log_messages, raw_addrv2): node = self.nodes[0] conn = node.add_p2p_connection(SenderOfAddrV2()) # Make sure bitcoind signals support for ADDRv2, otherwise this test # will bombard an old node with messages it does not recognize which # will produce unexpected results. conn.wait_for_sendaddrv2() self.log.info(f"Test addrv2: {label}") - msg = msg_unrecognized(str_data=b'') - msg.msgtype = b'addrv2' + msg = msg_unrecognized(str_data=b"") + msg.msgtype = b"addrv2" with node.assert_debug_log(required_log_messages): # override serialize() which would include the length of the data msg.serialize = lambda: raw_addrv2 conn.send_raw_message(conn.build_message(msg)) conn.sync_with_ping() node.disconnect_p2ps() def test_addrv2_empty(self): - self.test_addrv2('empty', - [ - 'received: addrv2 (0 bytes)', - 'ProcessMessages(addrv2, 0 bytes): Exception', - 'end of data', - ], - b'') + self.test_addrv2( + "empty", + [ + "received: addrv2 (0 bytes)", + "ProcessMessages(addrv2, 0 bytes): Exception", + "end of data", + ], + b"", + ) def test_addrv2_no_addresses(self): - self.test_addrv2('no addresses', - [ - 'received: addrv2 (1 bytes)', - ], - bytes.fromhex('00')) + self.test_addrv2( + "no addresses", + [ + "received: addrv2 (1 bytes)", + ], + bytes.fromhex("00"), + ) def test_addrv2_too_long_address(self): - self.test_addrv2('too long address', - [ - 'received: addrv2 (525 bytes)', - 'ProcessMessages(addrv2, 525 bytes): Exception', - 'Address too long: 513 > 512', - ], - bytes.fromhex( - # number of entries - '01' - # time, Fri Jan 9 02:54:25 UTC 2009 - '61bc6649' - # service flags, COMPACTSIZE(NODE_NONE) - '00' - # network type (IPv4) - '01' - # address length (COMPACTSIZE(513)) - 'fd0102' - # address - f'{"ab" * 513}' - # port - '208d')) + self.test_addrv2( + "too long address", + [ + "received: addrv2 (525 bytes)", + "ProcessMessages(addrv2, 525 bytes): Exception", + "Address too long: 513 > 512", + ], + bytes.fromhex( + # number of entries + "01" + # time, Fri Jan 9 02:54:25 UTC 2009 + "61bc6649" + # service flags, COMPACTSIZE(NODE_NONE) + "00" + # network type (IPv4) + "01" + # address length (COMPACTSIZE(513)) + "fd0102" + # address + f'{"ab" * 513}' + # port + "208d" + ), + ) def test_addrv2_unrecognized_network(self): - now_hex = struct.pack('= TIME_SIZE + \ - LENGTH_SIZE + MSGTYPE_SIZE + assert os.fstat(f_in.fileno()).st_size >= TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE while True: tmp_header_raw = f_in.read(TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE) if not tmp_header_raw: break tmp_header = BytesIO(tmp_header_raw) int.from_bytes(tmp_header.read(TIME_SIZE), "little") raw_msgtype = tmp_header.read(MSGTYPE_SIZE) - msgtype = raw_msgtype.split(b'\x00', 1)[0] - remainder = raw_msgtype.split(b'\x00', 1)[1] + msgtype = raw_msgtype.split(b"\x00", 1)[0] + remainder = raw_msgtype.split(b"\x00", 1)[1] assert len(msgtype) > 0 assert msgtype in MESSAGEMAP assert len(remainder) == 0 or not remainder.decode().isprintable() length = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") data = f_in.read(length) assert_equal(len(data), length) class MessageCaptureTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [["-capturemessages"]] self.setup_clean_chain = True def run_test(self): - capturedir = os.path.join( - self.nodes[0].datadir, - "regtest/message_capture") + capturedir = os.path.join(self.nodes[0].datadir, "regtest/message_capture") # Connect a node so that the handshake occurs self.nodes[0].add_p2p_connection(P2PDataStore()) self.nodes[0].disconnect_p2ps() recv_file = glob.glob(os.path.join(capturedir, "*/msgs_recv.dat"))[0] mini_parser(recv_file) sent_file = glob.glob(os.path.join(capturedir, "*/msgs_sent.dat"))[0] mini_parser(sent_file) -if __name__ == '__main__': +if __name__ == "__main__": MessageCaptureTest().main() diff --git a/test/functional/p2p_nobloomfilter_messages.py b/test/functional/p2p_nobloomfilter_messages.py index ebed6c6ce..b83185f4b 100755 --- a/test/functional/p2p_nobloomfilter_messages.py +++ b/test/functional/p2p_nobloomfilter_messages.py @@ -1,57 +1,55 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test invalid p2p messages for nodes with bloom filters disabled. Test that, when bloom filters are not enabled, peers are disconnected if: 1. They send a p2p mempool message 2. They send a p2p filterload message 3. They send a p2p filteradd message 4. They send a p2p filterclear message """ from test_framework.messages import ( msg_filteradd, msg_filterclear, msg_filterload, msg_mempool, ) from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class P2PNoBloomFilterMessages(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [["-peerbloomfilters=0"]] def test_message_causes_disconnect(self, message): """Add a p2p connection that sends a message and check that it disconnects.""" peer = self.nodes[0].add_p2p_connection(P2PInterface()) peer.send_message(message) peer.wait_for_disconnect() assert_equal(self.nodes[0].getconnectioncount(), 0) def run_test(self): - self.log.info( - "Test that peer is disconnected if it sends mempool message") + self.log.info("Test that peer is disconnected if it sends mempool message") self.test_message_causes_disconnect(msg_mempool()) - self.log.info( - "Test that peer is disconnected if it sends filterload message") + self.log.info("Test that peer is disconnected if it sends filterload message") self.test_message_causes_disconnect(msg_filterload()) - self.log.info( - "Test that peer is disconnected if it sends filteradd message") - self.test_message_causes_disconnect(msg_filteradd(data=b'\xcc')) + self.log.info("Test that peer is disconnected if it sends filteradd message") + self.test_message_causes_disconnect(msg_filteradd(data=b"\xcc")) self.log.info( - "Test that peer is disconnected if it sends a filterclear message") + "Test that peer is disconnected if it sends a filterclear message" + ) self.test_message_causes_disconnect(msg_filterclear()) -if __name__ == '__main__': +if __name__ == "__main__": P2PNoBloomFilterMessages().main() diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py index d1d682c61..3b72baa4b 100755 --- a/test/functional/p2p_node_network_limited.py +++ b/test/functional/p2p_node_network_limited.py @@ -1,133 +1,138 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Tests NODE_NETWORK_LIMITED. Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly and that it responds to getdata requests for blocks correctly: - send a block within 288 + 2 of the tip - disconnect peers who request blocks older than that.""" from test_framework.messages import ( MSG_BLOCK, NODE_BLOOM, NODE_NETWORK_LIMITED, CInv, msg_getdata, msg_verack, ) from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class P2PIgnoreInv(P2PInterface): firstAddrnServices = 0 def on_inv(self, message): # The node will send us invs for other blocks. Ignore them. pass def on_addr(self, message): self.firstAddrnServices = message.addrs[0].nServices def wait_for_addr(self, timeout=5): - def test_function(): return self.last_message.get("addr") + def test_function(): + return self.last_message.get("addr") + self.wait_until(test_function, timeout=timeout) def send_getdata_for_block(self, blockhash): getdata_request = msg_getdata() getdata_request.inv.append(CInv(MSG_BLOCK, int(blockhash, 16))) self.send_message(getdata_request) class NodeNetworkLimitedTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 - self.extra_args = [['-prune=550', '-addrmantest'], [], []] + self.extra_args = [["-prune=550", "-addrmantest"], [], []] def disconnect_all(self): self.disconnect_nodes(0, 1) self.disconnect_nodes(0, 2) self.disconnect_nodes(1, 2) def setup_network(self): self.add_nodes(self.num_nodes, self.extra_args) self.start_nodes() def run_test(self): node = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) expected_services = NODE_BLOOM | NODE_NETWORK_LIMITED self.log.info("Check that node has signalled expected services.") assert_equal(node.nServices, expected_services) self.log.info("Check that the localservices is as expected.") - assert_equal(int(self.nodes[0].getnetworkinfo()[ - 'localservices'], 16), expected_services) + assert_equal( + int(self.nodes[0].getnetworkinfo()["localservices"], 16), expected_services + ) - self.log.info( - "Mine enough blocks to reach the NODE_NETWORK_LIMITED range.") + self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.") self.connect_nodes(0, 1) - blocks = self.generate(self.nodes[1], 292, sync_fun=lambda: self.sync_blocks( - [self.nodes[0], self.nodes[1]])) + blocks = self.generate( + self.nodes[1], + 292, + sync_fun=lambda: self.sync_blocks([self.nodes[0], self.nodes[1]]), + ) self.log.info("Make sure we can max retrieve block at tip-288.") # last block in valid range node.send_getdata_for_block(blocks[1]) node.wait_for_block(int(blocks[1], 16), timeout=3) - self.log.info( - "Requesting block at height 2 (tip-289) must fail (ignored).") + self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).") # first block outside of the 288+2 limit node.send_getdata_for_block(blocks[0]) node.wait_for_disconnect(5) self.log.info("Check local address relay, do a fresh connection.") self.nodes[0].disconnect_p2ps() node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) node1.send_message(msg_verack()) node1.wait_for_addr() # must relay address with NODE_NETWORK_LIMITED assert_equal(node1.firstAddrnServices, expected_services) self.nodes[0].disconnect_p2ps() # connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer # because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, # sync must not be possible self.connect_nodes(0, 2) try: self.sync_blocks([self.nodes[0], self.nodes[2]], timeout=5) except Exception: pass # node2 must remain at heigh 0 - assert_equal(self.nodes[2].getblockheader( - self.nodes[2].getbestblockhash())['height'], 0) + assert_equal( + self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())["height"], 0 + ) # now connect also to node 1 (non pruned) self.connect_nodes(1, 2) # sync must be possible self.sync_blocks() # disconnect all peers self.disconnect_all() # mine 10 blocks on node 0 (pruned node) self.generate(self.nodes[0], 10, sync_fun=self.no_op) # connect node1 (non pruned) with node0 (pruned) and check if the can # sync self.connect_nodes(0, 1) # sync must be possible, node 1 is no longer in IBD and should # therefore connect to node 0 (NODE_NETWORK_LIMITED) self.sync_blocks([self.nodes[0], self.nodes[1]]) -if __name__ == '__main__': +if __name__ == "__main__": NodeNetworkLimitedTest().main() diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py index 29c22cf75..20097c50d 100755 --- a/test/functional/p2p_permissions.py +++ b/test/functional/p2p_permissions.py @@ -1,208 +1,219 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test p2p permission message. Test that permissions are correctly calculated and applied """ from test_framework.address import ADDRESS_ECREG_P2SH_OP_TRUE, SCRIPTSIG_OP_TRUE from test_framework.messages import CTransaction, FromHex from test_framework.p2p import P2PDataStore from test_framework.test_framework import BitcoinTestFramework from test_framework.test_node import ErrorMatch from test_framework.txtools import pad_tx from test_framework.util import assert_equal, p2p_port class P2PPermissionsTests(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True def run_test(self): self.check_tx_relay() self.checkpermission( # default permissions (no specific permissions) ["-whitelist=127.0.0.1"], # Make sure the default values in the command line documentation # match the ones here ["relay", "noban", "mempool", "download"], ) self.checkpermission( # check without deprecatedrpc=whitelisted ["-whitelist=127.0.0.1"], # Make sure the default values in the command line documentation # match the ones here ["relay", "noban", "mempool", "download"], ) self.checkpermission( # no permission (even with forcerelay) ["-whitelist=@127.0.0.1", "-whitelistforcerelay=1"], [], ) self.checkpermission( # relay permission removed (no specific permissions) ["-whitelist=127.0.0.1", "-whitelistrelay=0"], ["noban", "mempool", "download"], ) self.checkpermission( # forcerelay and relay permission added # Legacy parameter interaction which set whitelistrelay to true # if whitelistforcerelay is true ["-whitelist=127.0.0.1", "-whitelistforcerelay"], ["forcerelay", "relay", "noban", "mempool", "download"], ) # Let's make sure permissions are merged correctly # For this, we need to use whitebind instead of bind # by modifying the configuration file. ip_port = f"127.0.0.1:{p2p_port(1)}" self.replaceinconfig( - 1, - "bind=127.0.0.1", - f"whitebind=bloomfilter,forcerelay@{ip_port}") + 1, "bind=127.0.0.1", f"whitebind=bloomfilter,forcerelay@{ip_port}" + ) self.checkpermission( ["-whitelist=noban@127.0.0.1"], # Check parameter interaction forcerelay should activate relay ["noban", "bloomfilter", "forcerelay", "relay", "download"], ) self.replaceinconfig( - 1, - f"whitebind=bloomfilter,forcerelay@{ip_port}", - "bind=127.0.0.1") + 1, f"whitebind=bloomfilter,forcerelay@{ip_port}", "bind=127.0.0.1" + ) self.checkpermission( # legacy whitelistrelay should be ignored ["-whitelist=noban,mempool@127.0.0.1", "-whitelistrelay"], ["noban", "mempool", "download"], ) self.checkpermission( # check without deprecatedrpc=whitelisted ["-whitelist=noban,mempool@127.0.0.1", "-whitelistrelay"], ["noban", "mempool", "download"], ) self.checkpermission( # legacy whitelistforcerelay should be ignored ["-whitelist=noban,mempool@127.0.0.1", "-whitelistforcerelay"], ["noban", "mempool", "download"], ) self.checkpermission( # missing mempool permission to be considered legacy whitelisted ["-whitelist=noban@127.0.0.1"], ["noban", "download"], ) self.checkpermission( # all permission added ["-whitelist=all@127.0.0.1"], - ["forcerelay", "noban", "mempool", "bloomfilter", - "relay", "download", "bypass_proof_request_limits", "addr"], + [ + "forcerelay", + "noban", + "mempool", + "bloomfilter", + "relay", + "download", + "bypass_proof_request_limits", + "addr", + ], ) self.checkpermission( # bypass_proof_request_limits permission ["-whitelist=bypass_proof_request_limits@127.0.0.1"], ["bypass_proof_request_limits"], ) self.stop_node(1) self.nodes[1].assert_start_raises_init_error( ["-whitelist=oopsie@127.0.0.1"], "Invalid P2P permission", - match=ErrorMatch.PARTIAL_REGEX) + match=ErrorMatch.PARTIAL_REGEX, + ) self.nodes[1].assert_start_raises_init_error( ["-whitelist=noban@127.0.0.1:230"], "Invalid netmask specified in", - match=ErrorMatch.PARTIAL_REGEX) + match=ErrorMatch.PARTIAL_REGEX, + ) self.nodes[1].assert_start_raises_init_error( ["-whitebind=noban@127.0.0.1/10"], "Cannot resolve -whitebind address", - match=ErrorMatch.PARTIAL_REGEX) + match=ErrorMatch.PARTIAL_REGEX, + ) def check_tx_relay(self): block_op_true = self.nodes[0].getblock( - self.generatetoaddress(self.nodes[0], 100, ADDRESS_ECREG_P2SH_OP_TRUE)[0]) + self.generatetoaddress(self.nodes[0], 100, ADDRESS_ECREG_P2SH_OP_TRUE)[0] + ) self.log.debug( - "Create a connection from a forcerelay peer that rebroadcasts raw txs") + "Create a connection from a forcerelay peer that rebroadcasts raw txs" + ) # A python mininode is needed to send the raw transaction directly. # If a full node was used, it could only rebroadcast via the inv-getdata # mechanism. However, even for forcerelay connections, a full node would # currently not request a txid that is already in the mempool. self.restart_node(1, extra_args=["-whitelist=forcerelay@127.0.0.1"]) - p2p_rebroadcast_wallet = self.nodes[1].add_p2p_connection( - P2PDataStore()) + p2p_rebroadcast_wallet = self.nodes[1].add_p2p_connection(P2PDataStore()) self.log.debug("Send a tx from the wallet initially") - tx = FromHex(CTransaction(), - self.nodes[0].createrawtransaction( - inputs=[{'txid': block_op_true['tx'][0], 'vout': 0}], - outputs=[{ADDRESS_ECREG_P2SH_OP_TRUE: 50}])) + tx = FromHex( + CTransaction(), + self.nodes[0].createrawtransaction( + inputs=[{"txid": block_op_true["tx"][0], "vout": 0}], + outputs=[{ADDRESS_ECREG_P2SH_OP_TRUE: 50}], + ), + ) # push the one byte script to the stack tx.vin[0].scriptSig = SCRIPTSIG_OP_TRUE pad_tx(tx) txid = tx.rehash() self.log.debug("Wait until tx is in node[1]'s mempool") p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1]) self.log.debug( "Check that node[1] will send the tx to node[0] even though it" - " is already in the mempool") + " is already in the mempool" + ) self.connect_nodes(1, 0) - with self.nodes[1].assert_debug_log( - [f"Force relaying tx {txid} from peer=0"]): + with self.nodes[1].assert_debug_log([f"Force relaying tx {txid} from peer=0"]): p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1]) self.wait_until(lambda: txid in self.nodes[0].getrawmempool()) - self.log.debug( - "Check that node[1] will not send an invalid tx to node[0]") + self.log.debug("Check that node[1] will not send an invalid tx to node[0]") tx.vout[0].nValue += 1 txid = tx.rehash() # Send the transaction twice. The first time, it'll be rejected by ATMP # because it conflicts with a mempool transaction. The second time, # it'll be in the m_recent_rejects filter. p2p_rebroadcast_wallet.send_txs_and_test( [tx], self.nodes[1], success=False, - reject_reason=f'{txid} from peer=0 was not accepted: ' - f'txn-mempool-conflict', + reject_reason=f"{txid} from peer=0 was not accepted: txn-mempool-conflict", ) p2p_rebroadcast_wallet.send_txs_and_test( [tx], self.nodes[1], success=False, - reject_reason=f'Not relaying non-mempool transaction {txid} from ' - f'forcerelay peer=0', + reject_reason=( + f"Not relaying non-mempool transaction {txid} from forcerelay peer=0" + ), ) def checkpermission(self, args, expectedPermissions): self.restart_node(1, args) self.connect_nodes(0, 1) peerinfo = self.nodes[1].getpeerinfo()[0] - assert_equal(len(expectedPermissions), len(peerinfo['permissions'])) + assert_equal(len(expectedPermissions), len(peerinfo["permissions"])) for p in expectedPermissions: - if p not in peerinfo['permissions']: - raise AssertionError( - f"Expected permissions {p!r} is not granted.") + if p not in peerinfo["permissions"]: + raise AssertionError(f"Expected permissions {p!r} is not granted.") def replaceinconfig(self, nodeid, old, new): with open(self.nodes[nodeid].bitcoinconf, encoding="utf8") as f: newText = f.read().replace(old, new) - with open(self.nodes[nodeid].bitcoinconf, 'w', encoding="utf8") as f: + with open(self.nodes[nodeid].bitcoinconf, "w", encoding="utf8") as f: f.write(newText) -if __name__ == '__main__': +if __name__ == "__main__": P2PPermissionsTests().main() diff --git a/test/functional/p2p_ping.py b/test/functional/p2p_ping.py index fc4310551..81a43a317 100755 --- a/test/functional/p2p_ping.py +++ b/test/functional/p2p_ping.py @@ -1,137 +1,137 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test ping message """ import time from test_framework.messages import msg_pong from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal PING_INTERVAL = 2 * 60 class msg_pong_corrupt(msg_pong): def serialize(self): return b"" class NodePongAdd1(P2PInterface): def on_ping(self, message): self.send_message(msg_pong(message.nonce + 1)) class NodeNoPong(P2PInterface): def on_ping(self, message): pass TIMEOUT_INTERVAL = 20 * 60 class PingPongTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 # Set the peer connection timeout low. It does not matter for this # test, as long as it is less than TIMEOUT_INTERVAL. - self.extra_args = [['-peertimeout=1']] + self.extra_args = [["-peertimeout=1"]] def check_peer_info(self, *, pingtime, minping, pingwait): stats = self.nodes[0].getpeerinfo()[0] - assert_equal(stats.pop('pingtime', None), pingtime) - assert_equal(stats.pop('minping', None), minping) - assert_equal(stats.pop('pingwait', None), pingwait) + assert_equal(stats.pop("pingtime", None), pingtime) + assert_equal(stats.pop("minping", None), minping) + assert_equal(stats.pop("pingwait", None), pingwait) def mock_forward(self, delta): self.mock_time += delta self.nodes[0].setmocktime(self.mock_time) def run_test(self): self.mock_time = int(time.time()) self.mock_forward(0) - self.log.info( - 'Check that ping is sent after connection is established') + self.log.info("Check that ping is sent after connection is established") no_pong_node = self.nodes[0].add_p2p_connection(NodeNoPong()) self.mock_forward(3) - assert no_pong_node.last_message.pop('ping').nonce != 0 + assert no_pong_node.last_message.pop("ping").nonce != 0 self.check_peer_info(pingtime=None, minping=None, pingwait=3) - self.log.info('Reply without nonce cancels ping') - with self.nodes[0].assert_debug_log(['pong peer=0: Short payload']): + self.log.info("Reply without nonce cancels ping") + with self.nodes[0].assert_debug_log(["pong peer=0: Short payload"]): no_pong_node.send_and_ping(msg_pong_corrupt()) self.check_peer_info(pingtime=None, minping=None, pingwait=None) - self.log.info('Reply without ping') - with self.nodes[0].assert_debug_log([ - 'pong peer=0: Unsolicited pong without ping, 0 expected, 0 received, 8 bytes', - ]): + self.log.info("Reply without ping") + with self.nodes[0].assert_debug_log( + [ + ( + "pong peer=0: Unsolicited pong without ping, 0 expected, 0" + " received, 8 bytes" + ), + ] + ): no_pong_node.send_and_ping(msg_pong()) self.check_peer_info(pingtime=None, minping=None, pingwait=None) - self.log.info('Reply with wrong nonce does not cancel ping') - assert 'ping' not in no_pong_node.last_message - with self.nodes[0].assert_debug_log(['pong peer=0: Nonce mismatch']): + self.log.info("Reply with wrong nonce does not cancel ping") + assert "ping" not in no_pong_node.last_message + with self.nodes[0].assert_debug_log(["pong peer=0: Nonce mismatch"]): # mock time PING_INTERVAL ahead to trigger node into sending a ping self.mock_forward(PING_INTERVAL + 1) - no_pong_node.wait_until( - lambda: 'ping' in no_pong_node.last_message) + no_pong_node.wait_until(lambda: "ping" in no_pong_node.last_message) self.mock_forward(9) # Send the wrong pong no_pong_node.send_and_ping( - msg_pong(no_pong_node.last_message.pop('ping').nonce - 1)) + msg_pong(no_pong_node.last_message.pop("ping").nonce - 1) + ) self.check_peer_info(pingtime=None, minping=None, pingwait=9) - self.log.info('Reply with zero nonce does cancel ping') - with self.nodes[0].assert_debug_log(['pong peer=0: Nonce zero']): + self.log.info("Reply with zero nonce does cancel ping") + with self.nodes[0].assert_debug_log(["pong peer=0: Nonce zero"]): no_pong_node.send_and_ping(msg_pong(0)) self.check_peer_info(pingtime=None, minping=None, pingwait=None) - self.log.info('Check that ping is properly reported on RPC') - assert 'ping' not in no_pong_node.last_message + self.log.info("Check that ping is properly reported on RPC") + assert "ping" not in no_pong_node.last_message # mock time PING_INTERVAL ahead to trigger node into sending a ping self.mock_forward(PING_INTERVAL + 1) - no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) + no_pong_node.wait_until(lambda: "ping" in no_pong_node.last_message) ping_delay = 29 self.mock_forward(ping_delay) - no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) + no_pong_node.wait_until(lambda: "ping" in no_pong_node.last_message) no_pong_node.send_and_ping( - msg_pong(no_pong_node.last_message.pop('ping').nonce)) - self.check_peer_info( - pingtime=ping_delay, - minping=ping_delay, - pingwait=None) + msg_pong(no_pong_node.last_message.pop("ping").nonce) + ) + self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None) - self.log.info('Check that minping is decreased after a fast roundtrip') + self.log.info("Check that minping is decreased after a fast roundtrip") # mock time PING_INTERVAL ahead to trigger node into sending a ping self.mock_forward(PING_INTERVAL + 1) - no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) + no_pong_node.wait_until(lambda: "ping" in no_pong_node.last_message) ping_delay = 9 self.mock_forward(ping_delay) - no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) + no_pong_node.wait_until(lambda: "ping" in no_pong_node.last_message) no_pong_node.send_and_ping( - msg_pong(no_pong_node.last_message.pop('ping').nonce)) - self.check_peer_info( - pingtime=ping_delay, - minping=ping_delay, - pingwait=None) - - self.log.info('Check that peer is disconnected after ping timeout') - assert 'ping' not in no_pong_node.last_message + msg_pong(no_pong_node.last_message.pop("ping").nonce) + ) + self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None) + + self.log.info("Check that peer is disconnected after ping timeout") + assert "ping" not in no_pong_node.last_message self.nodes[0].ping() - no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) - with self.nodes[0].assert_debug_log(['ping timeout: 1201.000000s']): + no_pong_node.wait_until(lambda: "ping" in no_pong_node.last_message) + with self.nodes[0].assert_debug_log(["ping timeout: 1201.000000s"]): self.mock_forward(TIMEOUT_INTERVAL // 2) # Check that sending a ping does not prevent the disconnect no_pong_node.sync_with_ping() self.mock_forward(TIMEOUT_INTERVAL // 2 + 1) no_pong_node.wait_for_disconnect() -if __name__ == '__main__': +if __name__ == "__main__": PingPongTest().main() diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py index 5616269c5..dcadd65db 100755 --- a/test/functional/p2p_sendheaders.py +++ b/test/functional/p2p_sendheaders.py @@ -1,628 +1,647 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test behavior of headers messages to announce blocks. Setup: - Two nodes: - node0 is the node-under-test. We create two p2p connections to it. The first p2p connection is a control and should only ever receive inv's. The second p2p connection tests the headers sending logic. - node1 is used to create reorgs. test_null_locators ================== Sends two getheaders requests with null locator values. First request's hashstop value refers to validated block, while second request's hashstop value refers to a block which hasn't been validated. Verifies only the first request returns headers. test_nonnull_locators ===================== Part 1: No headers announcements before "sendheaders" a. node mines a block [expect: inv] send getdata for the block [expect: block] b. node mines another block [expect: inv] send getheaders and getdata [expect: headers, then block] c. node mines another block [expect: inv] peer mines a block, announces with header [expect: getdata] d. node mines another block [expect: inv] Part 2: After "sendheaders", headers announcements should generally work. a. peer sends sendheaders [expect: no response] peer sends getheaders with current tip [expect: no response] b. node mines a block [expect: tip header] c. for N in 1, ..., 10: * for announce-type in {inv, header} - peer mines N blocks, announces with announce-type [ expect: getheaders/getdata or getdata, deliver block(s) ] - node mines a block [ expect: 1 header ] Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer. - For response-type in {inv, getheaders} * node mines a 7 block reorg [ expect: headers announcement of 8 blocks ] * node mines an 8-block reorg [ expect: inv at tip ] * peer responds with getblocks/getdata [expect: inv, blocks ] * node mines another block [ expect: inv at tip, peer sends getdata, expect: block ] * node mines another block at tip [ expect: inv ] * peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers] * peer requests block [ expect: block ] * node mines another block at tip [ expect: inv, peer sends getdata, expect: block ] * peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block] * node mines 1 block [expect: 1 header, peer responds with getdata] Part 4: Test direct fetch behavior a. Announce 2 old block headers. Expect: no getdata requests. b. Announce 3 new blocks via 1 headers message. Expect: one getdata request for all 3 blocks. (Send blocks.) c. Announce 1 header that forks off the last two blocks. Expect: no response. d. Announce 1 more header that builds on that fork. Expect: one getdata request for two blocks. e. Announce 16 more headers that build on that fork. Expect: getdata request for 14 more blocks. f. Announce 1 more header that builds on that fork. Expect: no response. Part 5: Test handling of headers that don't connect. a. Repeat 10 times: 1. Announce a header that doesn't connect. Expect: getheaders message 2. Send headers chain. Expect: getdata for the missing blocks, tip update. b. Then send 9 more headers that don't connect. Expect: getheaders message each time. c. Announce a header that does connect. Expect: no response. d. Announce 49 headers that don't connect. Expect: getheaders message each time. e. Announce one more that doesn't connect. Expect: disconnect. """ from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import ( MSG_BLOCK, CBlockHeader, CInv, msg_block, msg_getblocks, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendheaders, ) from test_framework.p2p import P2PInterface, p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, uint256_hex DIRECT_FETCH_RESPONSE_TIME = 0.05 class BaseNode(P2PInterface): def __init__(self): super().__init__() self.block_announced = False self.last_blockhash_announced = None self.recent_headers_announced = [] def send_get_data(self, block_hashes): """Request data for a list of block hashes.""" msg = msg_getdata() for x in block_hashes: msg.inv.append(CInv(MSG_BLOCK, x)) self.send_message(msg) def send_get_headers(self, locator, hashstop): msg = msg_getheaders() msg.locator.vHave = locator msg.hashstop = hashstop self.send_message(msg) def send_block_inv(self, blockhash): msg = msg_inv() msg.inv = [CInv(MSG_BLOCK, blockhash)] self.send_message(msg) def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) def send_getblocks(self, locator): getblocks_message = msg_getblocks() getblocks_message.locator.vHave = locator self.send_message(getblocks_message) def wait_for_block_announcement(self, block_hash, timeout=60): - def test_function(): return self.last_blockhash_announced == block_hash + def test_function(): + return self.last_blockhash_announced == block_hash + self.wait_until(test_function, timeout=timeout) def on_inv(self, message): self.block_announced = True self.last_blockhash_announced = message.inv[-1].hash def on_headers(self, message): if len(message.headers): self.block_announced = True for x in message.headers: x.calc_sha256() # append because headers may be announced over multiple # messages. self.recent_headers_announced.append(x.sha256) self.last_blockhash_announced = message.headers[-1].sha256 def clear_block_announcements(self): with p2p_lock: self.block_announced = False self.last_message.pop("inv", None) self.last_message.pop("headers", None) self.recent_headers_announced = [] def check_last_headers_announcement(self, headers): """Test whether the last headers announcements received are right. - Headers may be announced across more than one message.""" + Headers may be announced across more than one message.""" + + def test_function(): + return len(self.recent_headers_announced) >= len(headers) - def test_function(): return (len(self.recent_headers_announced) >= len(headers)) self.wait_until(test_function) with p2p_lock: assert_equal(self.recent_headers_announced, headers) self.block_announced = False self.last_message.pop("headers", None) self.recent_headers_announced = [] def check_last_inv_announcement(self, inv): """Test whether the last announcement received had the right inv. inv should be a list of block hashes.""" - def test_function(): return self.block_announced + def test_function(): + return self.block_announced + self.wait_until(test_function) with p2p_lock: compare_inv = [] if "inv" in self.last_message: compare_inv = [x.hash for x in self.last_message["inv"].inv] assert_equal(compare_inv, inv) self.block_announced = False self.last_message.pop("inv", None) class SendHeadersTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [["-noparkdeepreorg"], ["-noparkdeepreorg"]] def mine_blocks(self, count): """Mine count blocks and return the new tip.""" # Clear out block announcements from each p2p listener [x.clear_block_announcements() for x in self.nodes[0].p2ps] - self.generatetoaddress(self.nodes[0], - count, self.nodes[0].get_deterministic_priv_key().address) + self.generatetoaddress( + self.nodes[0], count, self.nodes[0].get_deterministic_priv_key().address + ) return int(self.nodes[0].getbestblockhash(), 16) def mine_reorg(self, length): """Mine a reorg that invalidates length blocks (replacing them with # length+1 blocks). Note: we clear the state of our p2p connections after the to-be-reorged-out blocks are mined, so that we don't break later tests. return the list of block hashes newly mined.""" # make sure all invalidated blocks are node0's - self.generatetoaddress(self.nodes[0], - length, self.nodes[0].get_deterministic_priv_key().address) + self.generatetoaddress( + self.nodes[0], length, self.nodes[0].get_deterministic_priv_key().address + ) for x in self.nodes[0].p2ps: - x.wait_for_block_announcement( - int(self.nodes[0].getbestblockhash(), 16)) + x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16)) x.clear_block_announcements() tip_height = self.nodes[1].getblockcount() - hash_to_invalidate = self.nodes[1].getblockhash( - tip_height - (length - 1)) + hash_to_invalidate = self.nodes[1].getblockhash(tip_height - (length - 1)) self.nodes[1].invalidateblock(hash_to_invalidate) # Must be longer than the orig chain - all_hashes = self.generatetoaddress(self.nodes[1], - length + 1, self.nodes[1].get_deterministic_priv_key().address) + all_hashes = self.generatetoaddress( + self.nodes[1], + length + 1, + self.nodes[1].get_deterministic_priv_key().address, + ) return [int(x, 16) for x in all_hashes] def run_test(self): # Setup the p2p connections inv_node = self.nodes[0].add_p2p_connection(BaseNode()) # Make sure NODE_NETWORK is not set for test_node, so no block download # will occur outside of direct fetching test_node = self.nodes[0].add_p2p_connection(BaseNode(), services=0) self.test_null_locators(test_node, inv_node) self.test_nonnull_locators(test_node, inv_node) def test_null_locators(self, test_node, inv_node): - tip = self.nodes[0].getblockheader(self.generatetoaddress(self.nodes[0], - 1, self.nodes[0].get_deterministic_priv_key().address)[0]) + tip = self.nodes[0].getblockheader( + self.generatetoaddress( + self.nodes[0], 1, self.nodes[0].get_deterministic_priv_key().address + )[0] + ) tip_hash = int(tip["hash"], 16) inv_node.check_last_inv_announcement(inv=[tip_hash]) test_node.check_last_inv_announcement(inv=[tip_hash]) self.log.info( - "Verify getheaders with null locator and valid hashstop returns headers.") + "Verify getheaders with null locator and valid hashstop returns headers." + ) test_node.clear_block_announcements() test_node.send_get_headers(locator=[], hashstop=tip_hash) test_node.check_last_headers_announcement(headers=[tip_hash]) self.log.info( - "Verify getheaders with null locator and invalid hashstop does not return headers.") - block = create_block(int(tip["hash"], 16), create_coinbase( - tip["height"] + 1), tip["mediantime"] + 1) + "Verify getheaders with null locator and invalid hashstop does not return" + " headers." + ) + block = create_block( + int(tip["hash"], 16), + create_coinbase(tip["height"] + 1), + tip["mediantime"] + 1, + ) block.solve() test_node.send_header_for_blocks([block]) test_node.clear_block_announcements() test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16)) test_node.sync_with_ping() assert_equal(test_node.block_announced, False) inv_node.clear_block_announcements() test_node.send_message(msg_block(block)) inv_node.check_last_inv_announcement(inv=[int(block.hash, 16)]) def test_nonnull_locators(self, test_node, inv_node): tip = int(self.nodes[0].getbestblockhash(), 16) # PART 1 # 1. Mine a block; expect inv announcements each time - self.log.info( - "Part 1: headers don't start before sendheaders message...") + self.log.info("Part 1: headers don't start before sendheaders message...") for i in range(4): self.log.debug(f"Part 1.{i}: starting...") old_tip = tip tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) # Try a few different responses; none should affect next # announcement if i == 0: # first request the block test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # next try requesting header and block test_node.send_get_headers(locator=[old_tip], hashstop=tip) test_node.send_get_data([tip]) test_node.wait_for_block(tip) # since we requested headers... test_node.clear_block_announcements() elif i == 2: # this time announce own block via headers inv_node.clear_block_announcements() height = self.nodes[0].getblockcount() - last_time = self.nodes[0].getblock( - self.nodes[0].getbestblockhash())['time'] + last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())[ + "time" + ] block_time = last_time + 1 - new_block = create_block( - tip, create_coinbase(height + 1), block_time) + new_block = create_block(tip, create_coinbase(height + 1), block_time) new_block.solve() test_node.send_header_for_blocks([new_block]) test_node.wait_for_getdata([new_block.sha256]) # make sure this block is processed test_node.send_and_ping(msg_block(new_block)) inv_node.wait_until(lambda: inv_node.block_announced) inv_node.clear_block_announcements() test_node.clear_block_announcements() self.log.info("Part 1: success!") self.log.info( - "Part 2: announce blocks with headers after sendheaders message...") + "Part 2: announce blocks with headers after sendheaders message..." + ) # PART 2 # 2. Send a sendheaders message and test that headers announcements # commence and keep working. test_node.send_message(msg_sendheaders()) prev_tip = int(self.nodes[0].getbestblockhash(), 16) test_node.send_get_headers(locator=[prev_tip], hashstop=0) test_node.sync_with_ping() # Now that we've synced headers, headers announcements should work tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) height = self.nodes[0].getblockcount() + 1 block_time += 10 # Advance far enough ahead for i in range(10): self.log.debug(f"Part 2.{i}: starting...") # Mine i blocks, and alternate announcing either via # inv (of tip) or via headers. After each, new blocks # mined by the node should successfully be announced # with block header, even though the blocks are never requested for j in range(2): self.log.debug(f"Part 2.{i}.{j}: starting...") blocks = [] for _ in range(i + 1): - blocks.append(create_block( - tip, create_coinbase(height), block_time)) + blocks.append( + create_block(tip, create_coinbase(height), block_time) + ) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 if j == 0: # Announce via inv test_node.send_block_inv(tip) test_node.wait_for_getheaders() # Should have received a getheaders now test_node.send_header_for_blocks(blocks) # Test that duplicate inv's won't result in duplicate # getdata requests, or duplicate headers announcements [inv_node.send_block_inv(x.sha256) for x in blocks] test_node.wait_for_getdata([x.sha256 for x in blocks]) inv_node.sync_with_ping() else: # Announce via headers test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) # Test that duplicate headers won't result in duplicate # getdata requests (the check is further down) inv_node.send_header_for_blocks(blocks) inv_node.sync_with_ping() [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() inv_node.sync_with_ping() # This block should not be announced to the inv node (since it also # broadcast it) assert "inv" not in inv_node.last_message assert "headers" not in inv_node.last_message tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) height += 1 block_time += 1 self.log.info("Part 2: success!") self.log.info( - "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...") + "Part 3: headers announcements can stop after large reorg, and resume after" + " headers/inv from peer..." + ) # PART 3. Headers announcements can stop after large reorg, and resume after # getheaders or inv from peer. for j in range(2): self.log.debug(f"Part 3.{j}: starting...") # First try mining a reorg that can propagate with header # announcement new_block_hashes = self.mine_reorg(length=7) tip = new_block_hashes[-1] inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=new_block_hashes) block_time += 8 # Mine a too-large reorg, which should be announced with a single # inv new_block_hashes = self.mine_reorg(length=8) tip = new_block_hashes[-1] inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) block_time += 9 - fork_point = self.nodes[0].getblock( - uint256_hex(new_block_hashes[0]))["previousblockhash"] + fork_point = self.nodes[0].getblock(uint256_hex(new_block_hashes[0]))[ + "previousblockhash" + ] fork_point = int(fork_point, 16) # Use getblocks/getdata test_node.send_getblocks(locator=[fork_point]) test_node.check_last_inv_announcement(inv=new_block_hashes) test_node.send_get_data(new_block_hashes) test_node.wait_for_block(new_block_hashes[-1]) for i in range(3): self.log.debug(f"Part 3.{j}.{i}: starting...") # Mine another block, still should get only an inv tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) if i == 0: # Just get the data -- shouldn't cause headers # announcements to resume test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # Send a getheaders message that shouldn't trigger headers announcements # to resume (best header sent will be too old) test_node.send_get_headers( - locator=[fork_point], hashstop=new_block_hashes[1]) + locator=[fork_point], hashstop=new_block_hashes[1] + ) test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 2: # This time, try sending either a getheaders to trigger resumption # of headers announcements, or mine a new block and inv it, also # triggering resumption of headers announcements. test_node.send_get_data([tip]) test_node.wait_for_block(tip) if j == 0: test_node.send_get_headers(locator=[tip], hashstop=0) test_node.sync_with_ping() else: test_node.send_block_inv(tip) test_node.sync_with_ping() # New blocks should now be announced with header tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) self.log.info("Part 3: success!") self.log.info("Part 4: Testing direct fetch behavior...") tip = self.mine_blocks(1) height = self.nodes[0].getblockcount() + 1 - last_time = self.nodes[0].getblock( - self.nodes[0].getbestblockhash())['time'] + last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())["time"] block_time = last_time + 1 # Create 2 blocks. Send the blocks, then send the headers. blocks = [] for _ in range(2): - blocks.append(create_block( - tip, create_coinbase(height), block_time)) + blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 inv_node.send_message(msg_block(blocks[-1])) inv_node.sync_with_ping() # Make sure blocks are processed test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() # should not have received any getdata messages with p2p_lock: assert "getdata" not in test_node.last_message # This time, direct fetch should work blocks = [] for _ in range(3): - blocks.append(create_block( - tip, create_coinbase(height), block_time)) + blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() test_node.wait_for_getdata( - [x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME) + [x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME + ) [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() # Now announce a header that forks the last two blocks tip = blocks[0].sha256 height -= 2 blocks = [] # Create extra blocks for later for _ in range(20): - blocks.append(create_block( - tip, create_coinbase(height), block_time)) + blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 # Announcing one block on fork should not trigger direct fetch # (less work than tip) test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[0:1]) test_node.sync_with_ping() with p2p_lock: assert "getdata" not in test_node.last_message # Announcing one more block on fork should trigger direct fetch for # both blocks (same work as tip) test_node.send_header_for_blocks(blocks[1:2]) test_node.sync_with_ping() test_node.wait_for_getdata( - [x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME) + [x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME + ) # Announcing 16 more headers should trigger direct fetch for 14 more # blocks test_node.send_header_for_blocks(blocks[2:18]) test_node.sync_with_ping() test_node.wait_for_getdata( - [x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME) + [x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME + ) # Announcing 1 more header should not trigger any response test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[18:19]) test_node.sync_with_ping() with p2p_lock: assert "getdata" not in test_node.last_message self.log.info("Part 4: success!") # Now deliver all those blocks we announced. [test_node.send_message(msg_block(x)) for x in blocks] self.log.info("Part 5: Testing handling of unconnecting headers") # First we test that receipt of an unconnecting header doesn't prevent # chain sync. for i in range(10): self.log.debug(f"Part 5.{i}: starting...") test_node.last_message.pop("getdata", None) blocks = [] # Create two more blocks. for _ in range(2): - blocks.append(create_block( - tip, create_coinbase(height), block_time)) + blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 # Send the header of the second block -> this won't connect. with p2p_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[1]]) test_node.wait_for_getheaders() test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() - assert_equal( - int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) blocks = [] # Now we test that if we repeatedly don't send connecting headers, we # don't go into an infinite loop trying to get them to connect. MAX_UNCONNECTING_HEADERS = 10 for _ in range(MAX_UNCONNECTING_HEADERS + 1): - blocks.append(create_block( - tip, create_coinbase(height), block_time)) + blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 for i in range(1, MAX_UNCONNECTING_HEADERS): # Send a header that doesn't connect, check that we get a # getheaders. with p2p_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i]]) test_node.wait_for_getheaders() # Next header will connect, should re-set our count: test_node.send_header_for_blocks([blocks[0]]) # Remove the first two entries (blocks[1] would connect): blocks = blocks[2:] # Now try to see how many unconnecting headers we can send # before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS for i in range(5 * MAX_UNCONNECTING_HEADERS - 1): # Send a header that doesn't connect, check that we get a # getheaders. with p2p_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i % len(blocks)]]) test_node.wait_for_getheaders() # Eventually this stops working. test_node.send_header_for_blocks([blocks[-1]]) # Should get disconnected test_node.wait_for_disconnect() self.log.info("Part 5: success!") # Finally, check that the inv node never received a getdata request, # throughout the test assert "getdata" not in inv_node.last_message -if __name__ == '__main__': +if __name__ == "__main__": SendHeadersTest().main() diff --git a/test/functional/p2p_timeouts.py b/test/functional/p2p_timeouts.py index ba56d0df4..bea35d1f5 100755 --- a/test/functional/p2p_timeouts.py +++ b/test/functional/p2p_timeouts.py @@ -1,97 +1,100 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test various net timeouts. - Create three peers: no_verack_node - we never send a verack in response to their version no_version_node - we never send a version (only a ping) no_send_node - we never send any P2P message. - Wait 1 second - Assert that we're connected - Send a ping to no_verack_node and no_version_node - Wait 1 second - Assert that we're still connected - Send a ping to no_verack_node and no_version_node - Wait 2 seconds - Assert that we're no longer connected (timeout to receive version/verack is 3 seconds) """ import time from test_framework.messages import msg_ping from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework class TestP2PConn(P2PInterface): def on_version(self, message): # Don't send a verack in response pass class TimeoutsTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 # set timeout to receive version/verack to 3 seconds self.extra_args = [["-peertimeout=3"]] def mock_forward(self, delta): self.mock_time += delta self.nodes[0].setmocktime(self.mock_time) def run_test(self): self.mock_time = int(time.time()) self.mock_forward(0) # Setup the p2p connections no_verack_node = self.nodes[0].add_p2p_connection( - TestP2PConn(), wait_for_verack=False) + TestP2PConn(), wait_for_verack=False + ) no_version_node = self.nodes[0].add_p2p_connection( - TestP2PConn(), send_version=False, wait_for_verack=False) + TestP2PConn(), send_version=False, wait_for_verack=False + ) no_send_node = self.nodes[0].add_p2p_connection( - TestP2PConn(), send_version=False, wait_for_verack=False) + TestP2PConn(), send_version=False, wait_for_verack=False + ) # Wait until we got the verack in response to the version. Though, don't wait for the other node to receive the # verack, since we never sent one no_verack_node.wait_for_verack() self.mock_forward(1) assert no_verack_node.is_connected assert no_version_node.is_connected assert no_send_node.is_connected no_verack_node.send_message(msg_ping()) no_version_node.send_message(msg_ping()) self.mock_forward(1) assert "version" in no_verack_node.last_message assert no_verack_node.is_connected assert no_version_node.is_connected assert no_send_node.is_connected no_verack_node.send_message(msg_ping()) no_version_node.send_message(msg_ping()) expected_timeout_logs = [ "version handshake timeout peer=0", "socket no message in first 3 seconds, 1 0 peer=1", "socket no message in first 3 seconds, 0 0 peer=2", ] with self.nodes[0].assert_debug_log(expected_msgs=expected_timeout_logs): self.mock_forward(2) no_verack_node.wait_for_disconnect(timeout=1) no_version_node.wait_for_disconnect(timeout=1) no_send_node.wait_for_disconnect(timeout=1) -if __name__ == '__main__': +if __name__ == "__main__": TimeoutsTest().main() diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py index 616005e56..6e78d02f7 100755 --- a/test/functional/p2p_unrequested_blocks.py +++ b/test/functional/p2p_unrequested_blocks.py @@ -1,328 +1,338 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test processing of unrequested blocks. Setup: two nodes, node0 + node1, not connected to each other. Node1 will have nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks. We have one P2PInterface connection to node0 called test_node, and one to node1 called min_work_node. The test: 1. Generate one block on each node, to leave IBD. 2. Mine a new block on each tip, and deliver to each node from node's peer. The tip should advance for node0, but node1 should skip processing due to nMinimumChainWork. Node1 is unused in tests 3-7: 3. Mine a block that forks from the genesis block, and deliver to test_node. Node0 should not process this block (just accept the header), because it is unrequested and doesn't have more or equal work to the tip. 4a,b. Send another two blocks that build on the forking block. Node0 should process the second block but be stuck on the shorter chain, because it's missing an intermediate block. 4c.Send 288 more blocks on the longer chain (the number of blocks ahead we currently store). Node0 should process all but the last block (too far ahead in height). 5. Send a duplicate of the block in #3 to Node0. Node0 should not process the block because it is unrequested, and stay on the shorter chain. 6. Send Node0 an inv for the height 3 block produced in #4 above. Node0 should figure out that Node0 has the missing height 2 block and send a getdata. 7. Send Node0 the missing block again. Node0 should process and the tip should advance. 8. Create a fork which is invalid at a height longer than the current chain (ie to which the node will try to reorg) but which has headers built on top of the invalid block. Check that we get disconnected if we send more headers on the chain the node now knows to be invalid. 9. Test Node1 is able to sync when connected to node0 (which should have sufficient work on its chain). """ import time from test_framework.blocktools import ( create_block, create_coinbase, create_tx_with_script, ) from test_framework.messages import ( MSG_BLOCK, CBlockHeader, CInv, msg_block, msg_headers, msg_inv, ) from test_framework.p2p import P2PInterface, p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error class AcceptBlockTest(BitcoinTestFramework): - def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 - self.extra_args = [["-noparkdeepreorg"], - ["-minimumchainwork=0x10"]] + self.extra_args = [["-noparkdeepreorg"], ["-minimumchainwork=0x10"]] def setup_network(self): self.setup_nodes() def run_test(self): test_node = self.nodes[0].add_p2p_connection(P2PInterface()) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) # 1. Have nodes mine a block (leave IBD) [self.generate(n, 1, sync_fun=self.no_op) for n in self.nodes] tips = [int(n.getbestblockhash(), 16) for n in self.nodes] # 2. Send one block that builds on each tip. # This should be accepted by node0 blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): - blocks_h2.append(create_block( - tips[i], create_coinbase(2), block_time)) + blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_and_ping(msg_block(blocks_h2[0])) min_work_node.send_and_ping(msg_block(blocks_h2[1])) assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 1) self.log.info( - "First height 2 block accepted by node0; correctly rejected by node1") + "First height 2 block accepted by node0; correctly rejected by node1" + ) # 3. Send another block that builds on genesis. block_h1f = create_block( - int(self.nodes[0].getblockhash(0), 16), create_coinbase(1), block_time) + int(self.nodes[0].getblockhash(0), 16), create_coinbase(1), block_time + ) block_time += 1 block_h1f.solve() test_node.send_and_ping(msg_block(block_h1f)) tip_entry_found = False for x in self.nodes[0].getchaintips(): - if x['hash'] == block_h1f.hash: - assert_equal(x['status'], "headers-only") + if x["hash"] == block_h1f.hash: + assert_equal(x["status"], "headers-only") tip_entry_found = True assert tip_entry_found - assert_raises_rpc_error(-1, "Block not found on disk", - self.nodes[0].getblock, block_h1f.hash) + assert_raises_rpc_error( + -1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash + ) # 4. Send another two block that build on the fork. - block_h2f = create_block( - block_h1f.sha256, create_coinbase(2), block_time) + block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time) block_time += 1 block_h2f.solve() test_node.send_and_ping(msg_block(block_h2f)) # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): - if x['hash'] == block_h2f.hash: - assert_equal(x['status'], "headers-only") + if x["hash"] == block_h2f.hash: + assert_equal(x["status"], "headers-only") tip_entry_found = True assert tip_entry_found # But this block should be accepted by node since it has equal work. self.nodes[0].getblock(block_h2f.hash) self.log.info("Second height 2 block accepted, but not reorg'ed to") # 4b. Now send another block that builds on the forking chain. block_h3 = create_block( - block_h2f.sha256, create_coinbase(3), block_h2f.nTime + 1) + block_h2f.sha256, create_coinbase(3), block_h2f.nTime + 1 + ) block_h3.solve() test_node.send_and_ping(msg_block(block_h3)) # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): - if x['hash'] == block_h3.hash: - assert_equal(x['status'], "headers-only") + if x["hash"] == block_h3.hash: + assert_equal(x["status"], "headers-only") tip_entry_found = True assert tip_entry_found self.nodes[0].getblock(block_h3.hash) # But this block should be accepted by node since it has more work. self.nodes[0].getblock(block_h3.hash) self.log.info("Unrequested more-work block accepted") # 4c. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node (as long as it is not missing any # headers) tip = block_h3 all_blocks = [] for i in range(288): - next_block = create_block( - tip.sha256, create_coinbase(i + 4), tip.nTime + 1) + next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime + 1) next_block.solve() all_blocks.append(next_block) tip = next_block # Now send the block at height 5 and check that it wasn't accepted # (missing header) test_node.send_and_ping(msg_block(all_blocks[1])) - assert_raises_rpc_error(-5, "Block not found", - self.nodes[0].getblock, all_blocks[1].hash) - assert_raises_rpc_error(-5, "Block not found", - self.nodes[0].getblockheader, all_blocks[1].hash) + assert_raises_rpc_error( + -5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash + ) + assert_raises_rpc_error( + -5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash + ) # The block at height 5 should be accepted if we provide the missing # header, though headers_message = msg_headers() headers_message.headers.append(CBlockHeader(all_blocks[0])) test_node.send_message(headers_message) test_node.send_and_ping(msg_block(all_blocks[1])) self.nodes[0].getblock(all_blocks[1].hash) # Now send the blocks in all_blocks for i in range(288): test_node.send_message(msg_block(all_blocks[i])) test_node.sync_with_ping() # Blocks 1-287 should be accepted, block 288 should be ignored because # it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error( - -1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) + -1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash + ) # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). # The node should have requested the blocks at some point, so # disconnect/reconnect first self.nodes[0].disconnect_p2ps() self.nodes[1].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) test_node.send_and_ping(msg_block(block_h1f)) assert_equal(self.nodes[0].getblockcount(), 2) self.log.info( - "Unrequested block that would complete more-work chain was ignored") + "Unrequested block that would complete more-work chain was ignored" + ) # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with p2p_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(msg_inv([CInv(MSG_BLOCK, block_h3.sha256)])) test_node.sync_with_ping() with p2p_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, block_h1f.sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_and_ping(msg_block(block_h1f)) assert_equal(self.nodes[0].getblockcount(), 290) self.nodes[0].getblock(all_blocks[286].hash) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) - assert_raises_rpc_error(-1, "Block not found on disk", - self.nodes[0].getblock, all_blocks[287].hash) + assert_raises_rpc_error( + -1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash + ) self.log.info("Successfully reorged to longer chain") # 8. Create a chain which is invalid at a height longer than the # current chain, but which has more blocks on top of that block_289f = create_block( - all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime + 1) + all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime + 1 + ) block_289f.solve() block_290f = create_block( - block_289f.sha256, create_coinbase(290), block_289f.nTime + 1) + block_289f.sha256, create_coinbase(290), block_289f.nTime + 1 + ) block_290f.solve() block_291 = create_block( - block_290f.sha256, create_coinbase(291), block_290f.nTime + 1) + block_290f.sha256, create_coinbase(291), block_290f.nTime + 1 + ) # block_291 spends a coinbase below maturity! - block_291.vtx.append(create_tx_with_script( - block_290f.vtx[0], 0, script_sig=b"42", amount=1)) + block_291.vtx.append( + create_tx_with_script(block_290f.vtx[0], 0, script_sig=b"42", amount=1) + ) block_291.hashMerkleRoot = block_291.calc_merkle_root() block_291.solve() block_292 = create_block( - block_291.sha256, create_coinbase(292), block_291.nTime + 1) + block_291.sha256, create_coinbase(292), block_291.nTime + 1 + ) block_292.solve() # Now send all the headers on the chain and enough blocks to trigger # reorg headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_289f)) headers_message.headers.append(CBlockHeader(block_290f)) headers_message.headers.append(CBlockHeader(block_291)) headers_message.headers.append(CBlockHeader(block_292)) test_node.send_and_ping(headers_message) tip_entry_found = False for x in self.nodes[0].getchaintips(): - if x['hash'] == block_292.hash: - assert_equal(x['status'], "headers-only") + if x["hash"] == block_292.hash: + assert_equal(x["status"], "headers-only") tip_entry_found = True assert tip_entry_found - assert_raises_rpc_error(-1, "Block not found on disk", - self.nodes[0].getblock, block_292.hash) + assert_raises_rpc_error( + -1, "Block not found on disk", self.nodes[0].getblock, block_292.hash + ) test_node.send_message(msg_block(block_289f)) test_node.send_and_ping(msg_block(block_290f)) self.nodes[0].getblock(block_289f.hash) self.nodes[0].getblock(block_290f.hash) test_node.send_message(msg_block(block_291)) # At this point we've sent an obviously-bogus block, wait for full processing # without assuming whether we will be disconnected or not try: # Only wait a short while so the test doesn't take forever if we do get # disconnected test_node.sync_with_ping(timeout=1) except AssertionError: test_node.wait_for_disconnect() self.nodes[0].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # We should have failed reorg and switched back to 290 (but have block # 291) assert_equal(self.nodes[0].getblockcount(), 290) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) - assert_equal(self.nodes[0].getblock( - block_291.hash)["confirmations"], -1) + assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1) # Now send a new header on the invalid chain, indicating we're forked # off, and expect to get disconnected block_293 = create_block( - block_292.sha256, create_coinbase(293), block_292.nTime + 1) + block_292.sha256, create_coinbase(293), block_292.nTime + 1 + ) block_293.solve() headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_293)) test_node.send_message(headers_message) test_node.wait_for_disconnect() # 9. Connect node1 to node0 and ensure it is able to sync self.connect_nodes(0, 1) self.sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Successfully synced nodes 1 and 0") -if __name__ == '__main__': +if __name__ == "__main__": AcceptBlockTest().main()