diff --git a/test/functional/p2p_filter.py b/test/functional/p2p_filter.py
index 1d6d32b53..14da369d3 100755
--- a/test/functional/p2p_filter.py
+++ b/test/functional/p2p_filter.py
@@ -1,281 +1,276 @@
 #!/usr/bin/env python3
 # Copyright (c) 2020 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """
 Test BIP 37
 """
 
 from test_framework.messages import (
     CInv,
     MAX_BLOOM_FILTER_SIZE,
     MAX_BLOOM_HASH_FUNCS,
     MSG_BLOCK,
     MSG_FILTERED_BLOCK,
     msg_filteradd,
     msg_filterclear,
     msg_filterload,
     msg_getdata,
     msg_mempool,
     msg_version,
 )
 from test_framework.p2p import P2PInterface, p2p_lock
 from test_framework.script import MAX_SCRIPT_ELEMENT_SIZE
 from test_framework.test_framework import BitcoinTestFramework
 
 
 class P2PBloomFilter(P2PInterface):
     # This is a P2SH watch-only wallet
     watch_script_pubkey = 'a914ffffffffffffffffffffffffffffffffffffffff87'
     # The initial filter (n=10, fp=0.000001) with just the above scriptPubKey
     # added
     watch_filter_init = msg_filterload(
         data=b'@\x00\x08\x00\x80\x00\x00 \x00\xc0\x00 \x04\x00\x08$\x00\x04\x80\x00\x00 \x00\x00\x00\x00\x80\x00\x00@\x00\x02@ \x00',
         nHashFuncs=19,
         nTweak=0,
         nFlags=1,
     )
 
     def __init__(self):
         super().__init__()
         self._tx_received = False
         self._merkleblock_received = False
 
     def on_inv(self, message):
         want = msg_getdata()
         for i in message.inv:
             # inv messages can only contain TX or BLOCK, so translate BLOCK to
             # FILTERED_BLOCK
             if i.type == MSG_BLOCK:
                 want.inv.append(CInv(MSG_FILTERED_BLOCK, i.hash))
             else:
                 want.inv.append(i)
         if len(want.inv):
             self.send_message(want)
 
     def on_merkleblock(self, message):
         self._merkleblock_received = True
 
     def on_tx(self, message):
         self._tx_received = True
 
     @property
     def tx_received(self):
         with p2p_lock:
             return self._tx_received
 
     @tx_received.setter
     def tx_received(self, value):
         with p2p_lock:
             self._tx_received = value
 
     @property
     def merkleblock_received(self):
         with p2p_lock:
             return self._merkleblock_received
 
     @merkleblock_received.setter
     def merkleblock_received(self, value):
         with p2p_lock:
             self._merkleblock_received = value
 
 
 class FilterTest(BitcoinTestFramework):
     def set_test_params(self):
         self.setup_clean_chain = False
         self.num_nodes = 1
         self.extra_args = [[
             '-peerbloomfilters',
             '-whitelist=noban@127.0.0.1',  # immediate tx relay
         ]]
 
     def skip_test_if_missing_module(self):
         self.skip_if_no_wallet()
 
     def test_size_limits(self, filter_peer):
         self.log.info('Check that too large filter is rejected')
         with self.nodes[0].assert_debug_log(['Misbehaving']):
             filter_peer.send_and_ping(msg_filterload(
                 data=b'\xbb' * (MAX_BLOOM_FILTER_SIZE + 1)))
 
         self.log.info('Check that max size filter is accepted')
         with self.nodes[0].assert_debug_log([""], unexpected_msgs=['Misbehaving']):
             filter_peer.send_and_ping(
                 msg_filterload(
                     data=b'\xbb' *
                     (MAX_BLOOM_FILTER_SIZE)))
         filter_peer.send_and_ping(msg_filterclear())
 
         self.log.info(
             'Check that filter with too many hash functions is rejected')
         with self.nodes[0].assert_debug_log(['Misbehaving']):
             filter_peer.send_and_ping(
                 msg_filterload(
                     data=b'\xaa',
                     nHashFuncs=MAX_BLOOM_HASH_FUNCS + 1))
 
         self.log.info('Check that filter with max hash functions is accepted')
         with self.nodes[0].assert_debug_log([""], unexpected_msgs=['Misbehaving']):
             filter_peer.send_and_ping(
                 msg_filterload(
                     data=b'\xaa',
                     nHashFuncs=MAX_BLOOM_HASH_FUNCS))
         # Don't send filterclear until next two filteradd checks are done
 
         self.log.info(
             'Check that max size data element to add to the filter is accepted')
         with self.nodes[0].assert_debug_log([""], unexpected_msgs=['Misbehaving']):
             filter_peer.send_and_ping(
                 msg_filteradd(
                     data=b'\xcc' *
                     (MAX_SCRIPT_ELEMENT_SIZE)))
 
         self.log.info(
             'Check that too large data element to add to the filter is rejected')
         with self.nodes[0].assert_debug_log(['Misbehaving']):
             filter_peer.send_and_ping(msg_filteradd(
                 data=b'\xcc' * (MAX_SCRIPT_ELEMENT_SIZE + 1)))
 
         filter_peer.send_and_ping(msg_filterclear())
 
     def test_msg_mempool(self):
         self.log.info(
             "Check that a node with bloom filters enabled services p2p mempool messages")
         filter_peer = P2PBloomFilter()
 
         self.log.debug("Create a tx relevant to the peer before connecting")
         filter_address = self.nodes[0].decodescript(
             filter_peer.watch_script_pubkey)['addresses'][0]
         txid = self.nodes[0].sendtoaddress(filter_address, 90)
 
         self.log.debug(
             "Send a mempool msg after connecting and check that the tx is received")
         self.nodes[0].add_p2p_connection(filter_peer)
         filter_peer.send_and_ping(filter_peer.watch_filter_init)
         self.nodes[0].p2p.send_message(msg_mempool())
         filter_peer.wait_for_tx(txid)
 
     def test_frelay_false(self, filter_peer):
         self.log.info(
             "Check that a node with fRelay set to false does not receive invs until the filter is set")
         filter_peer.tx_received = False
         filter_address = self.nodes[0].decodescript(
             filter_peer.watch_script_pubkey)['addresses'][0]
         self.nodes[0].sendtoaddress(filter_address, 90)
         # Sync to make sure the reason filter_peer doesn't receive the tx is
         # not p2p delays
         filter_peer.sync_with_ping()
         assert not filter_peer.tx_received
 
         # Clear the mempool so that this transaction does not impact subsequent
         # tests
         self.nodes[0].generate(1)
 
     def test_filter(self, filter_peer):
         # Set the bloomfilter using filterload
         filter_peer.send_and_ping(filter_peer.watch_filter_init)
         # If fRelay is not already True, sending filterload sets it to True
         assert self.nodes[0].getpeerinfo()[0]['relaytxes']
         filter_address = self.nodes[0].decodescript(
             filter_peer.watch_script_pubkey)['addresses'][0]
 
         self.log.info(
             'Check that we receive merkleblock and tx if the filter matches a tx in a block')
         block_hash = self.nodes[0].generatetoaddress(1, filter_address)[0]
         txid = self.nodes[0].getblock(block_hash)['tx'][0]
         filter_peer.wait_for_merkleblock(block_hash)
         filter_peer.wait_for_tx(txid)
 
         self.log.info(
             'Check that we only receive a merkleblock if the filter does not match a tx in a block')
         filter_peer.tx_received = False
         block_hash = self.nodes[0].generatetoaddress(
             1, self.nodes[0].getnewaddress())[0]
         filter_peer.wait_for_merkleblock(block_hash)
         assert not filter_peer.tx_received
 
         self.log.info(
             'Check that we not receive a tx if the filter does not match a mempool tx')
         filter_peer.merkleblock_received = False
         filter_peer.tx_received = False
         self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 90)
         filter_peer.sync_with_ping()
         filter_peer.sync_with_ping()
         assert not filter_peer.merkleblock_received
         assert not filter_peer.tx_received
 
         self.log.info(
             'Check that we receive a tx if the filter matches a mempool tx')
         filter_peer.merkleblock_received = False
         txid = self.nodes[0].sendtoaddress(filter_address, 90)
         filter_peer.wait_for_tx(txid)
         assert not filter_peer.merkleblock_received
 
         self.log.info(
             'Check that after deleting filter all txs get relayed again')
         filter_peer.send_and_ping(msg_filterclear())
 
         for _ in range(5):
             txid = self.nodes[0].sendtoaddress(
                 self.nodes[0].getnewaddress(), 7)
             filter_peer.wait_for_tx(txid)
 
         self.log.info(
             'Check that request for filtered blocks is ignored if no filter'
             ' is set')
         filter_peer.merkleblock_received = False
         filter_peer.tx_received = False
         with self.nodes[0].assert_debug_log(expected_msgs=['received getdata']):
             block_hash = self.nodes[0].generatetoaddress(
                 1, self.nodes[0].getnewaddress())[0]
             filter_peer.wait_for_inv([CInv(MSG_BLOCK, int(block_hash, 16))])
             filter_peer.sync_with_ping()
             assert not filter_peer.merkleblock_received
             assert not filter_peer.tx_received
 
         self.log.info(
             'Check that sending "filteradd" if no filter is set is treated as '
             'misbehavior')
         with self.nodes[0].assert_debug_log(['Misbehaving']):
             filter_peer.send_and_ping(msg_filteradd(data=b'letsmisbehave'))
 
         self.log.info(
             "Check that division-by-zero remote crash bug [CVE-2013-5700] is fixed")
         filter_peer.send_and_ping(msg_filterload(data=b'', nHashFuncs=1))
         filter_peer.send_and_ping(
             msg_filteradd(
                 data=b'letstrytocrashthisnode'))
         self.nodes[0].disconnect_p2ps()
 
     def run_test(self):
         filter_peer = self.nodes[0].add_p2p_connection(P2PBloomFilter())
         self.log.info('Test filter size limits')
         self.test_size_limits(filter_peer)
 
         self.log.info('Test BIP 37 for a node with fRelay = True (default)')
         self.test_filter(filter_peer)
         self.nodes[0].disconnect_p2ps()
 
         self.log.info('Test BIP 37 for a node with fRelay = False')
         # Add peer but do not send version yet
         filter_peer_without_nrelay = self.nodes[0].add_p2p_connection(
             P2PBloomFilter(), send_version=False, wait_for_verack=False)
         # Send version with fRelay=False
-        filter_peer_without_nrelay.wait_until(
-            lambda: filter_peer_without_nrelay.is_connected,
-            timeout=10,
-            check_connected=False,
-        )
         version_without_fRelay = msg_version()
         version_without_fRelay.nRelay = 0
         filter_peer_without_nrelay.send_message(version_without_fRelay)
         filter_peer_without_nrelay.wait_for_verack()
         assert not self.nodes[0].getpeerinfo()[0]['relaytxes']
         self.test_frelay_false(filter_peer_without_nrelay)
         self.test_filter(filter_peer_without_nrelay)
 
         self.test_msg_mempool()
 
 
 if __name__ == '__main__':
     FilterTest().main()
diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py
index e24fd6009..ec5a65e84 100755
--- a/test/functional/p2p_leak.py
+++ b/test/functional/p2p_leak.py
@@ -1,204 +1,205 @@
 #!/usr/bin/env python3
 # Copyright (c) 2017-2019 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Test message sending before handshake completion.
 
 A node should never send anything other than VERSION/VERACK until it's
 received a VERACK.
 
 This test connects to a node and sends it a few messages, trying to entice it
 into sending us something it shouldn't.
 """
 
 import time
 
 from test_framework.messages import (
     msg_getaddr,
     msg_ping,
     msg_verack,
     msg_version,
 )
 from test_framework.p2p import (
     p2p_lock,
     P2PInterface,
 )
 from test_framework.test_framework import BitcoinTestFramework
 from test_framework.util import (
     assert_equal,
     assert_greater_than_or_equal,
     wait_until,
 )
 
 DISCOURAGEMENT_THRESHOLD = 10
 
 
 class CLazyNode(P2PInterface):
     def __init__(self):
         super().__init__()
         self.unexpected_msg = False
         self.ever_connected = False
 
     def bad_message(self, message):
         self.unexpected_msg = True
         self.log.info(
             "should not have received message: {}".format(message.msgtype))
 
     def on_open(self):
         self.ever_connected = True
 
     def on_version(self, message): self.bad_message(message)
 
     def on_verack(self, message): self.bad_message(message)
 
     def on_inv(self, message): self.bad_message(message)
 
     def on_addr(self, message): self.bad_message(message)
 
     def on_getdata(self, message): self.bad_message(message)
 
     def on_getblocks(self, message): self.bad_message(message)
 
     def on_tx(self, message): self.bad_message(message)
 
     def on_block(self, message): self.bad_message(message)
 
     def on_getaddr(self, message): self.bad_message(message)
 
     def on_headers(self, message): self.bad_message(message)
 
     def on_getheaders(self, message): self.bad_message(message)
 
     def on_ping(self, message): self.bad_message(message)
 
     def on_mempool(self, message): self.bad_message(message)
 
     def on_pong(self, message): self.bad_message(message)
 
     def on_feefilter(self, message): self.bad_message(message)
 
     def on_sendheaders(self, message): self.bad_message(message)
 
     def on_sendcmpct(self, message): self.bad_message(message)
 
     def on_cmpctblock(self, message): self.bad_message(message)
 
     def on_getblocktxn(self, message): self.bad_message(message)
 
     def on_blocktxn(self, message): self.bad_message(message)
 
+
 # Node that never sends a version. We'll use this to send a bunch of messages
 # anyway, and eventually get disconnected.
 
 
 class CNodeNoVersionMisbehavior(CLazyNode):
-    # Send enough veracks without a message to reach the peer discouragement
-    # threshold. This should get us disconnected. NOTE: implementation-specific
-    # test; update if our discouragement policy for peer misbehavior changes.
-    def on_open(self):
-        super().on_open()
-        for _ in range(DISCOURAGEMENT_THRESHOLD):
-            self.send_message(msg_verack())
+    pass
+
 
 # Node that never sends a version. This one just sits idle and hopes to receive
 # any message (it shouldn't!)
 
 
 class CNodeNoVersionIdle(CLazyNode):
     def __init__(self):
         super().__init__()
 
+
 # Node that sends a version but not a verack.
 
 
 class CNodeNoVerackIdle(CLazyNode):
     def __init__(self):
         self.version_received = False
         super().__init__()
 
     def on_verack(self, message): pass
     # When version is received, don't reply with a verack. Instead, see if the
     # node will give us a message that it shouldn't. This is not an exhaustive
     # list!
 
     def on_version(self, message):
         self.version_received = True
         self.send_message(msg_ping())
         self.send_message(msg_getaddr())
 
 
 class P2PVersionStore(P2PInterface):
     version_received = None
 
     def on_version(self, msg):
         super().on_version(msg)
         self.version_received = msg
 
 
 class P2PLeakTest(BitcoinTestFramework):
     def set_test_params(self):
         self.num_nodes = 1
 
     def run_test(self):
         no_version_disconnect_node = self.nodes[0].add_p2p_connection(
             CNodeNoVersionMisbehavior(), send_version=False, wait_for_verack=False)
         no_version_idlenode = self.nodes[0].add_p2p_connection(
             CNodeNoVersionIdle(), send_version=False, wait_for_verack=False)
         no_verack_idlenode = self.nodes[0].add_p2p_connection(
             CNodeNoVerackIdle(), wait_for_verack=False)
 
+        # Send enough veracks without a message to reach the peer discouragement
+        # threshold. This should get us disconnected.
+        for _ in range(DISCOURAGEMENT_THRESHOLD):
+            no_version_disconnect_node.send_message(msg_verack())
+
         # Wait until we got the verack in response to the version. Though, don't wait for the other node to receive the
         # verack, since we never sent one
         no_verack_idlenode.wait_for_verack()
 
         wait_until(
             lambda: no_version_disconnect_node.ever_connected,
             timeout=10,
             lock=p2p_lock)
         wait_until(lambda: no_version_idlenode.ever_connected,
                    timeout=10, lock=p2p_lock)
         wait_until(lambda: no_verack_idlenode.version_received,
                    timeout=10, lock=p2p_lock)
 
         # Mine a block and make sure that it's not sent to the connected nodes
         self.nodes[0].generatetoaddress(
             1, self.nodes[0].get_deterministic_priv_key().address)
 
         # Give the node enough time to possibly leak out a message
         time.sleep(5)
 
         # Expect this node to be disconnected for misbehavior
         assert not no_version_disconnect_node.is_connected
 
         self.nodes[0].disconnect_p2ps()
 
         # Make sure no unexpected messages came in
         assert not no_version_disconnect_node.unexpected_msg
         assert not no_version_idlenode.unexpected_msg
         assert not no_verack_idlenode.unexpected_msg
 
         self.log.info(
             'Check that the version message does not leak the local address of the node')
         p2p_version_store = self.nodes[0].add_p2p_connection(P2PVersionStore())
         ver = p2p_version_store.version_received
         # Check that received time is within one hour of now
         assert_greater_than_or_equal(ver.nTime, time.time() - 3600)
         assert_greater_than_or_equal(time.time() + 3600, ver.nTime)
         assert_equal(ver.addrFrom.port, 0)
         assert_equal(ver.addrFrom.ip, '0.0.0.0')
         assert_equal(ver.nStartingHeight, 201)
         assert_equal(ver.nRelay, 1)
 
         self.log.info('Check that old nodes are disconnected')
         p2p_old_node = self.nodes[0].add_p2p_connection(
             P2PInterface(), send_version=False, wait_for_verack=False)
         old_version_msg = msg_version()
         old_version_msg.nVersion = 31799
-        wait_until(lambda: p2p_old_node.is_connected)
         with self.nodes[0].assert_debug_log(['peer=4 using obsolete version 31799; disconnecting']):
             p2p_old_node.send_message(old_version_msg)
             p2p_old_node.wait_for_disconnect()
 
 
 if __name__ == '__main__':
     P2PLeakTest().main()
diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py
index 74be3ece6..bf1d800fe 100755
--- a/test/functional/test_framework/p2p.py
+++ b/test/functional/test_framework/p2p.py
@@ -1,800 +1,800 @@
 #!/usr/bin/env python3
 # Copyright (c) 2010 ArtForz -- public domain half-a-node
 # Copyright (c) 2012 Jeff Garzik
 # Copyright (c) 2010-2019 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Test objects for interacting with a bitcoind node over the p2p protocol.
 
 The P2PInterface objects interact with the bitcoind nodes under test using the
 node's p2p interface. They can be used to send messages to the node, and
 callbacks can be registered that execute when messages are received from the
 node. Messages are sent to/received from the node on an asyncio event loop.
 State held inside the objects must be guarded by the p2p_lock to avoid data
 races between the main testing thread and the event loop.
 
 P2PConnection: A low-level connection object to a node's P2P interface
 P2PInterface: A high-level interface object for communicating to a node over P2P
 P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
               and can respond correctly to getdata and getheaders messages
 P2PTxInvStore: A p2p interface class that inherits from P2PDataStore, and keeps
               a count of how many times each txid has been announced."""
 
 import asyncio
 from collections import defaultdict
 from io import BytesIO
 import logging
 import struct
 import sys
 import threading
 
 from test_framework.messages import (
     CBlockHeader,
     MAX_HEADERS_RESULTS,
     MIN_VERSION_SUPPORTED,
     msg_addr,
     msg_addrv2,
     msg_avapoll,
     msg_avaproof,
     msg_tcpavaresponse,
     msg_avahello,
     msg_block,
     MSG_BLOCK,
     msg_blocktxn,
     msg_cfcheckpt,
     msg_cfheaders,
     msg_cfilter,
     msg_cmpctblock,
     msg_feefilter,
     msg_filteradd,
     msg_filterclear,
     msg_filterload,
     msg_getaddr,
     msg_getblocks,
     msg_getblocktxn,
     msg_getdata,
     msg_getheaders,
     msg_headers,
     msg_inv,
     msg_mempool,
     msg_merkleblock,
     msg_notfound,
     msg_ping,
     msg_pong,
     msg_sendaddrv2,
     msg_sendcmpct,
     msg_sendheaders,
     msg_tx,
     MSG_TX,
     MSG_TYPE_MASK,
     msg_verack,
     msg_version,
     NODE_NETWORK,
     sha256,
 )
 from test_framework.util import wait_until
 
 logger = logging.getLogger("TestFramework.p2p")
 
 MESSAGEMAP = {
     b"addr": msg_addr,
     b"addrv2": msg_addrv2,
     b"avapoll": msg_avapoll,
     b"avaproof": msg_avaproof,
     b"avaresponse": msg_tcpavaresponse,
     b"avahello": msg_avahello,
     b"block": msg_block,
     b"blocktxn": msg_blocktxn,
     b"cfcheckpt": msg_cfcheckpt,
     b"cfheaders": msg_cfheaders,
     b"cfilter": msg_cfilter,
     b"cmpctblock": msg_cmpctblock,
     b"feefilter": msg_feefilter,
     b"filteradd": msg_filteradd,
     b"filterclear": msg_filterclear,
     b"filterload": msg_filterload,
     b"getaddr": msg_getaddr,
     b"getblocks": msg_getblocks,
     b"getblocktxn": msg_getblocktxn,
     b"getdata": msg_getdata,
     b"getheaders": msg_getheaders,
     b"headers": msg_headers,
     b"inv": msg_inv,
     b"mempool": msg_mempool,
     b"merkleblock": msg_merkleblock,
     b"notfound": msg_notfound,
     b"ping": msg_ping,
     b"pong": msg_pong,
     b"sendaddrv2": msg_sendaddrv2,
     b"sendcmpct": msg_sendcmpct,
     b"sendheaders": msg_sendheaders,
     b"tx": msg_tx,
     b"verack": msg_verack,
     b"version": msg_version,
 }
 
 MAGIC_BYTES = {
     "mainnet": b"\xe3\xe1\xf3\xe8",
     "testnet3": b"\xf4\xe5\xf3\xf4",
     "regtest": b"\xda\xb5\xbf\xfa",
 }
 
 
 class P2PConnection(asyncio.Protocol):
     """A low-level connection object to a node's P2P interface.
 
     This class is responsible for:
 
     - opening and closing the TCP connection to the node
     - reading bytes from and writing bytes to the socket
     - deserializing and serializing the P2P message header
     - logging messages as they are sent and received
 
     This class contains no logic for handing the P2P message payloads. It must be
     sub-classed and the on_message() callback overridden."""
 
     def __init__(self):
         # The underlying transport of the connection.
         # Should only call methods on this from the NetworkThread, c.f.
         # call_soon_threadsafe
         self._transport = None
 
     @property
     def is_connected(self):
         return self._transport is not None
 
     def peer_connect(self, dstaddr, dstport, *, net, timeout_factor):
         assert not self.is_connected
         self.timeout_factor = timeout_factor
         self.dstaddr = dstaddr
         self.dstport = dstport
         # The initial message to send after the connection was made:
         self.on_connection_send_msg = None
         self.on_connection_send_msg_is_raw = False
         self.recvbuf = b""
         self.magic_bytes = MAGIC_BYTES[net]
         logger.debug('Connecting to Bitcoin Node: {}:{}'.format(
             self.dstaddr, self.dstport))
 
         loop = NetworkThread.network_event_loop
         conn_gen_unsafe = loop.create_connection(
             lambda: self, host=self.dstaddr, port=self.dstport)
 
         def conn_gen(): return loop.call_soon_threadsafe(
             loop.create_task, conn_gen_unsafe)
         return conn_gen
 
     def peer_disconnect(self):
         # Connection could have already been closed by other end.
         NetworkThread.network_event_loop.call_soon_threadsafe(
             lambda: self._transport and self._transport.abort())
 
     # Connection and disconnection methods
 
     def connection_made(self, transport):
         """asyncio callback when a connection is opened."""
         assert not self._transport
         logger.debug("Connected & Listening: {}:{}".format(
             self.dstaddr, self.dstport))
         self._transport = transport
         if self.on_connection_send_msg:
             if self.on_connection_send_msg_is_raw:
                 self.send_raw_message(self.on_connection_send_msg)
             else:
                 self.send_message(self.on_connection_send_msg)
             # Never used again
             self.on_connection_send_msg = None
         self.on_open()
 
     def connection_lost(self, exc):
         """asyncio callback when a connection is closed."""
         if exc:
             logger.warning("Connection lost to {}:{} due to {}".format(
                 self.dstaddr, self.dstport, exc))
         else:
             logger.debug("Closed connection to: {}:{}".format(
                 self.dstaddr, self.dstport))
         self._transport = None
         self.recvbuf = b""
         self.on_close()
 
     # Socket read methods
 
     def data_received(self, t):
         """asyncio callback when data is read from the socket."""
         with p2p_lock:
             if len(t) > 0:
                 self.recvbuf += t
 
         while True:
             msg = self._on_data()
             if msg is None:
                 break
             self.on_message(msg)
 
     def _on_data(self):
         """Try to read P2P messages from the recv buffer.
 
         This method reads data from the buffer in a loop. It deserializes,
         parses and verifies the P2P header, then passes the P2P payload to
         the on_message callback for processing."""
         try:
             with p2p_lock:
                 if len(self.recvbuf) < 4:
                     return None
                 if self.recvbuf[:4] != self.magic_bytes:
                     raise ValueError(
                         "magic bytes mismatch: {} != {}".format(
                             repr(
                                 self.magic_bytes), repr(
                                 self.recvbuf)))
                 if len(self.recvbuf) < 4 + 12 + 4 + 4:
                     return None
                 msgtype = self.recvbuf[4:4 + 12].split(b"\x00", 1)[0]
                 msglen = struct.unpack(
                     "<i", self.recvbuf[4 + 12:4 + 12 + 4])[0]
                 checksum = self.recvbuf[4 + 12 + 4:4 + 12 + 4 + 4]
                 if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
                     return None
                 msg = self.recvbuf[4 + 12 + 4 + 4:4 + 12 + 4 + 4 + msglen]
                 h = sha256(sha256(msg))
                 if checksum != h[:4]:
                     raise ValueError("got bad checksum " + repr(self.recvbuf))
                 self.recvbuf = self.recvbuf[4 + 12 + 4 + 4 + msglen:]
                 if msgtype not in MESSAGEMAP:
                     raise ValueError("Received unknown msgtype from {}:{}: '{}' {}".format(
                         self.dstaddr, self.dstport, msgtype, repr(msg)))
                 f = BytesIO(msg)
                 m = MESSAGEMAP[msgtype]()
                 m.deserialize(f)
                 self._log_message("receive", m)
                 return m
         except Exception as e:
             logger.exception('Error reading message:', repr(e))
             raise
 
     def on_message(self, message):
         """Callback for processing a P2P payload. Must be overridden by derived class."""
         raise NotImplementedError
 
     # Socket write methods
 
     def send_message(self, message):
         """Send a P2P message over the socket.
 
         This method takes a P2P payload, builds the P2P header and adds
         the message to the send buffer to be sent over the socket."""
         if not self.is_connected:
             raise IOError('Not connected')
         tmsg = self.build_message(message)
         self._log_message("send", message)
         return self.send_raw_message(tmsg)
 
     def send_raw_message(self, raw_message_bytes):
         """Send any raw message over the socket.
 
         This method adds a raw message to the send buffer to be sent over the
         socket."""
         if not self.is_connected:
             raise IOError('Not connected')
 
         def maybe_write():
             if not self._transport:
                 return
             if self._transport.is_closing():
                 return
             self._transport.write(raw_message_bytes)
         NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
 
     # Class utility methods
 
     def build_message(self, message):
         """Build a serialized P2P message"""
         msgtype = message.msgtype
         data = message.serialize()
         tmsg = self.magic_bytes
         tmsg += msgtype
         tmsg += b"\x00" * (12 - len(msgtype))
         tmsg += struct.pack("<I", len(data))
         th = sha256(data)
         h = sha256(th)
         tmsg += h[:4]
         tmsg += data
         return tmsg
 
     def _log_message(self, direction, msg):
         """Logs a message being sent or received over the connection."""
         if direction == "send":
             log_message = "Send message to "
         elif direction == "receive":
             log_message = "Received message from "
         log_message += "{}:{}: {}".format(
             self.dstaddr, self.dstport, repr(msg)[:500])
         if len(log_message) > 500:
             log_message += "... (msg truncated)"
         logger.debug(log_message)
 
 
 class P2PInterface(P2PConnection):
     """A high-level P2P interface class for communicating with a Bitcoin Cash node.
 
     This class provides high-level callbacks for processing P2P message
     payloads, as well as convenience methods for interacting with the
     node over P2P.
 
     Individual testcases should subclass this and override the on_* methods
     if they want to alter message handling behaviour."""
 
     def __init__(self, support_addrv2=False):
         super().__init__()
 
         # Track number of messages of each type received.
         # Should be read-only in a test.
         self.message_count = defaultdict(int)
 
         # Track the most recent message of each type.
         # To wait for a message to be received, pop that message from
         # this and use wait_until.
         self.last_message = {}
 
         # A count of the number of ping messages we've sent to the node
         self.ping_counter = 1
 
         # The network services received from the peer
         self.nServices = 0
 
         self.support_addrv2 = support_addrv2
 
     def peer_connect(self, *args, services=NODE_NETWORK,
                      send_version=True, **kwargs):
         create_conn = super().peer_connect(*args, **kwargs)
 
         if send_version:
             # Send a version msg
             vt = msg_version()
             vt.nServices = services
             vt.addrTo.ip = self.dstaddr
             vt.addrTo.port = self.dstport
             vt.addrFrom.ip = "0.0.0.0"
             vt.addrFrom.port = 0
 
             # Will be sent soon after connection_made
             self.on_connection_send_msg = vt
 
         return create_conn
 
     # Message receiving methods
 
     def on_message(self, message):
         """Receive message and dispatch message to appropriate callback.
 
         We keep a count of how many of each message type has been received
         and the most recent message of each type."""
         with p2p_lock:
             try:
                 msgtype = message.msgtype.decode('ascii')
                 self.message_count[msgtype] += 1
                 self.last_message[msgtype] = message
                 getattr(self, 'on_' + msgtype)(message)
             except Exception:
                 print("ERROR delivering {} ({})".format(
                     repr(message), sys.exc_info()[0]))
                 raise
 
     # Callback methods. Can be overridden by subclasses in individual test
     # cases to provide custom message handling behaviour.
 
     def on_open(self):
         pass
 
     def on_close(self):
         pass
 
     def on_addr(self, message): pass
 
     def on_addrv2(self, message): pass
 
     def on_avapoll(self, message): pass
 
     def on_avaproof(self, message): pass
 
     def on_avaresponse(self, message): pass
 
     def on_avahello(self, message): pass
 
     def on_block(self, message): pass
 
     def on_blocktxn(self, message): pass
 
     def on_cfcheckpt(self, message): pass
 
     def on_cfheaders(self, message): pass
 
     def on_cfilter(self, message): pass
 
     def on_cmpctblock(self, message): pass
 
     def on_feefilter(self, message): pass
 
     def on_filteradd(self, message): pass
 
     def on_filterclear(self, message): pass
 
     def on_filterload(self, message): pass
 
     def on_getaddr(self, message): pass
 
     def on_getblocks(self, message): pass
 
     def on_getblocktxn(self, message): pass
 
     def on_getdata(self, message): pass
 
     def on_getheaders(self, message): pass
 
     def on_headers(self, message): pass
 
     def on_mempool(self, message): pass
 
     def on_merkleblock(self, message): pass
 
     def on_notfound(self, message): pass
 
     def on_pong(self, message): pass
 
     def on_sendaddrv2(self, message): pass
 
     def on_sendcmpct(self, message): pass
 
     def on_sendheaders(self, message): pass
 
     def on_tx(self, message): pass
 
     def on_inv(self, message):
         want = msg_getdata()
         for i in message.inv:
             if i.type != 0:
                 want.inv.append(i)
         if len(want.inv):
             self.send_message(want)
 
     def on_ping(self, message):
         self.send_message(msg_pong(message.nonce))
 
     def on_verack(self, message):
         pass
 
     def on_version(self, message):
         assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(
             message.nVersion, MIN_VERSION_SUPPORTED)
         self.send_message(msg_verack())
         if self.support_addrv2:
             self.send_message(msg_sendaddrv2())
         self.nServices = message.nServices
 
     # Connection helper methods
 
     def wait_until(self, test_function_in, *, timeout=60,
                    check_connected=True):
         def test_function():
             if check_connected:
                 assert self.is_connected
             return test_function_in()
 
         wait_until(test_function, timeout=timeout, lock=p2p_lock,
                    timeout_factor=self.timeout_factor)
 
     def wait_for_disconnect(self, timeout=60):
         def test_function(): return not self.is_connected
         self.wait_until(test_function, timeout=timeout, check_connected=False)
 
     # Message receiving helper methods
 
     def wait_for_tx(self, txid, timeout=60):
         def test_function():
             if not self.last_message.get('tx'):
                 return False
             return self.last_message['tx'].tx.rehash() == txid
 
         self.wait_until(test_function, timeout=timeout)
 
     def wait_for_block(self, blockhash, timeout=60):
         def test_function():
             return self.last_message.get(
                 "block") and self.last_message["block"].block.rehash() == blockhash
 
         self.wait_until(test_function, timeout=timeout)
 
     def wait_for_header(self, blockhash, timeout=60):
         def test_function():
             last_headers = self.last_message.get('headers')
             if not last_headers:
                 return False
             return last_headers.headers[0].rehash() == int(blockhash, 16)
 
         self.wait_until(test_function, timeout=timeout)
 
     def wait_for_merkleblock(self, blockhash, timeout=60):
         def test_function():
             last_filtered_block = self.last_message.get('merkleblock')
             if not last_filtered_block:
                 return False
             return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16)
 
         self.wait_until(test_function, timeout=timeout)
 
     def wait_for_getdata(self, hash_list, timeout=60):
         """Waits for a getdata message.
 
         The object hashes in the inventory vector must match the provided hash_list."""
         def test_function():
             last_data = self.last_message.get("getdata")
             if not last_data:
                 return False
             return [x.hash for x in last_data.inv] == hash_list
 
         self.wait_until(test_function, timeout=timeout)
 
     def wait_for_getheaders(self, timeout=60):
         """Waits for a getheaders message.
 
         Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
         value must be explicitly cleared before calling this method, or this will return
         immediately with success. TODO: change this method to take a hash value and only
         return true if the correct block header has been requested."""
         def test_function():
             return self.last_message.get("getheaders")
 
         self.wait_until(test_function, timeout=timeout)
 
     def wait_for_inv(self, expected_inv, timeout=60):
         """Waits for an INV message and checks that the first inv object in the message was as expected."""
         if len(expected_inv) > 1:
             raise NotImplementedError(
                 "wait_for_inv() will only verify the first inv object")
 
         def test_function():
             return self.last_message.get("inv") and \
                 self.last_message["inv"].inv[0].type == expected_inv[0].type and \
                 self.last_message["inv"].inv[0].hash == expected_inv[0].hash
 
         self.wait_until(test_function, timeout=timeout)
 
     def wait_for_verack(self, timeout=60):
         def test_function():
             return "verack" in self.last_message
 
-        self.wait_until(test_function, timeout=timeout, check_connected=False)
+        self.wait_until(test_function, timeout=timeout)
 
     # Message sending helper functions
 
     def send_and_ping(self, message, timeout=60):
         self.send_message(message)
         self.sync_with_ping(timeout=timeout)
 
     # Sync up with the node
     def sync_with_ping(self, timeout=60):
         self.send_message(msg_ping(nonce=self.ping_counter))
 
         def test_function():
             return self.last_message.get(
                 "pong") and self.last_message["pong"].nonce == self.ping_counter
 
         self.wait_until(test_function, timeout=timeout)
         self.ping_counter += 1
 
 
 # One lock for synchronizing all data access between the networking thread (see
 # NetworkThread below) and the thread running the test logic.  For simplicity,
 # P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
 # This lock should be acquired in the thread running the test logic to synchronize
 # access to any data shared with the P2PInterface or P2PConnection.
 p2p_lock = threading.Lock()
 
 
 class NetworkThread(threading.Thread):
     network_event_loop = None
 
     def __init__(self):
         super().__init__(name="NetworkThread")
         # There is only one event loop and no more than one thread must be
         # created
         assert not self.network_event_loop
 
         NetworkThread.network_event_loop = asyncio.new_event_loop()
 
     def run(self):
         """Start the network thread."""
         self.network_event_loop.run_forever()
 
     def close(self, timeout=10):
         """Close the connections and network event loop."""
         self.network_event_loop.call_soon_threadsafe(
             self.network_event_loop.stop)
         wait_until(lambda: not self.network_event_loop.is_running(),
                    timeout=timeout)
         self.network_event_loop.close()
         self.join(timeout)
         # Safe to remove event loop.
         NetworkThread.network_event_loop = None
 
 
 class P2PDataStore(P2PInterface):
     """A P2P data store class.
 
     Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
 
     def __init__(self):
         super().__init__()
         # store of blocks. key is block hash, value is a CBlock object
         self.block_store = {}
         self.last_block_hash = ''
         # store of txs. key is txid, value is a CTransaction object
         self.tx_store = {}
         self.getdata_requests = []
 
     def on_getdata(self, message):
         """Check for the tx/block in our stores and if found, reply with an inv message."""
         for inv in message.inv:
             self.getdata_requests.append(inv.hash)
             if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
                 self.send_message(msg_tx(self.tx_store[inv.hash]))
             elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
                 self.send_message(msg_block(self.block_store[inv.hash]))
             else:
                 logger.debug(
                     'getdata message type {} received.'.format(hex(inv.type)))
 
     def on_getheaders(self, message):
         """Search back through our block store for the locator, and reply with a headers message if found."""
 
         locator, hash_stop = message.locator, message.hashstop
 
         # Assume that the most recent block added is the tip
         if not self.block_store:
             return
 
         headers_list = [self.block_store[self.last_block_hash]]
         while headers_list[-1].sha256 not in locator.vHave:
             # Walk back through the block store, adding headers to headers_list
             # as we go.
             prev_block_hash = headers_list[-1].hashPrevBlock
             if prev_block_hash in self.block_store:
                 prev_block_header = CBlockHeader(
                     self.block_store[prev_block_hash])
                 headers_list.append(prev_block_header)
                 if prev_block_header.sha256 == hash_stop:
                     # if this is the hashstop header, stop here
                     break
             else:
                 logger.debug('block hash {} not found in block store'.format(
                     hex(prev_block_hash)))
                 break
 
         # Truncate the list if there are too many headers
         headers_list = headers_list[:-MAX_HEADERS_RESULTS - 1:-1]
         response = msg_headers(headers_list)
 
         if response is not None:
             self.send_message(response)
 
     def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False,
                              reject_reason=None, expect_disconnect=False, timeout=60):
         """Send blocks to test node and test whether the tip advances.
 
          - add all blocks to our block_store
          - send a headers message for the final block
          - the on_getheaders handler will ensure that any getheaders are responded to
          - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will
            ensure that any getdata messages are responded to. Otherwise send the full block unsolicited.
          - if success is True: assert that the node's tip advances to the most recent block
          - if success is False: assert that the node's tip doesn't advance
          - if reject_reason is set: assert that the correct reject message is logged"""
 
         with p2p_lock:
             for block in blocks:
                 self.block_store[block.sha256] = block
                 self.last_block_hash = block.sha256
 
         def test():
             if force_send:
                 for b in blocks:
                     self.send_message(msg_block(block=b))
 
             else:
                 self.send_message(
                     msg_headers([CBlockHeader(block) for block in blocks]))
                 self.wait_until(
                     lambda: blocks[-1].sha256 in self.getdata_requests,
                     timeout=timeout,
                     check_connected=success,
                 )
 
             if expect_disconnect:
                 self.wait_for_disconnect(timeout=timeout)
             else:
                 self.sync_with_ping(timeout=timeout)
 
             if success:
                 self.wait_until(lambda: node.getbestblockhash() ==
                                 blocks[-1].hash, timeout=timeout)
             else:
                 assert node.getbestblockhash() != blocks[-1].hash
 
         if reject_reason:
             with node.assert_debug_log(expected_msgs=[reject_reason]):
                 test()
         else:
             test()
 
     def send_txs_and_test(self, txs, node, *, success=True,
                           expect_disconnect=False, reject_reason=None):
         """Send txs to test node and test whether they're accepted to the mempool.
 
          - add all txs to our tx_store
          - send tx messages for all txs
          - if success is True/False: assert that the txs are/are not accepted to the mempool
          - if expect_disconnect is True: Skip the sync with ping
          - if reject_reason is set: assert that the correct reject message is logged."""
 
         with p2p_lock:
             for tx in txs:
                 self.tx_store[tx.sha256] = tx
 
         def test():
             for tx in txs:
                 self.send_message(msg_tx(tx))
 
             if expect_disconnect:
                 self.wait_for_disconnect()
             else:
                 self.sync_with_ping()
 
             raw_mempool = node.getrawmempool()
             if success:
                 # Check that all txs are now in the mempool
                 for tx in txs:
                     assert tx.hash in raw_mempool, "{} not found in mempool".format(
                         tx.hash)
             else:
                 # Check that none of the txs are now in the mempool
                 for tx in txs:
                     assert tx.hash not in raw_mempool, "{} tx found in mempool".format(
                         tx.hash)
 
         if reject_reason:
             with node.assert_debug_log(expected_msgs=[reject_reason]):
                 test()
         else:
             test()
 
 
 class P2PTxInvStore(P2PInterface):
     """A P2PInterface which stores a count of how many times each txid has been announced."""
 
     def __init__(self):
         super().__init__()
         self.tx_invs_received = defaultdict(int)
 
     def on_inv(self, message):
         # Send getdata in response.
         super().on_inv(message)
         # Store how many times invs have been received for each tx.
         for i in message.inv:
             if i.type == MSG_TX:
                 # save txid
                 self.tx_invs_received[i.hash] += 1
 
     def get_invs(self):
         with p2p_lock:
             return list(self.tx_invs_received.keys())
 
     def wait_for_broadcast(self, txns, timeout=60):
         """Waits for the txns (list of txids) to complete initial broadcast.
         The mempool should mark unbroadcast=False for these transactions.
         """
         # Wait until invs have been received (and getdatas sent) for each txid.
         self.wait_until(lambda: set(self.tx_invs_received.keys()) == set(
             [int(tx, 16) for tx in txns]), timeout=timeout)
         # Flush messages and wait for the getdatas to be processed
         self.sync_with_ping()
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index ee05237f6..892db203a 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -1,911 +1,914 @@
 #!/usr/bin/env python3
 # Copyright (c) 2017-2019 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Class for bitcoind node under test"""
 
 import contextlib
 import decimal
 from enum import Enum
 import errno
 import http.client
 import json
 import logging
 import os
 import re
 import subprocess
 import sys
 import tempfile
 import time
 import urllib.parse
 import collections
 import shlex
 
 from .authproxy import JSONRPCException
 from .descriptors import descsum_create
 from .messages import XEC, CTransaction, FromHex, MY_SUBVERSION
 from .util import (
     MAX_NODES,
     append_config,
     delete_cookie_file,
     get_auth_cookie,
     get_rpc_proxy,
     p2p_port,
     rpc_url,
     wait_until,
     EncodeDecimal,
 )
 
 BITCOIND_PROC_WAIT_TIMEOUT = 60
 
 
 class FailedToStartError(Exception):
     """Raised when a node fails to start correctly."""
 
 
 class ErrorMatch(Enum):
     FULL_TEXT = 1
     FULL_REGEX = 2
     PARTIAL_REGEX = 3
 
 
 class TestNode():
     """A class for representing a bitcoind node under test.
 
     This class contains:
 
     - state about the node (whether it's running, etc)
     - a Python subprocess.Popen object representing the running process
     - an RPC connection to the node
     - one or more P2P connections to the node
 
     To make things easier for the test writer, any unrecognised messages will
     be dispatched to the RPC connection."""
 
     def __init__(self, i, datadir, *, chain, host, rpc_port, p2p_port, timewait, timeout_factor, bitcoind, bitcoin_cli,
                  coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, emulator=None, start_perf=False, use_valgrind=False):
         """
         Kwargs:
             start_perf (bool): If True, begin profiling the node with `perf` as soon as
                 the node starts.
         """
 
         self.index = i
         self.datadir = datadir
         self.bitcoinconf = os.path.join(self.datadir, "bitcoin.conf")
         self.stdout_dir = os.path.join(self.datadir, "stdout")
         self.stderr_dir = os.path.join(self.datadir, "stderr")
         self.chain = chain
         self.host = host
         self.rpc_port = rpc_port
         self.p2p_port = p2p_port
         self.name = "testnode-{}".format(i)
         self.rpc_timeout = timewait
         self.binary = bitcoind
         if not os.path.isfile(self.binary):
             raise FileNotFoundError(
                 "Binary '{}' could not be found.\nTry setting it manually:\n\tBITCOIND=<path/to/bitcoind> {}".format(self.binary, sys.argv[0]))
         self.coverage_dir = coverage_dir
         self.cwd = cwd
         if extra_conf is not None:
             append_config(datadir, extra_conf)
         # Most callers will just need to add extra args to the default list
         # below.
         # For those callers that need more flexibility, they can access the
         # default args using the provided facilities.
         # Note that common args are set in the config file (see
         # initialize_datadir)
         self.extra_args = extra_args
         # Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
         # This means that starting a bitcoind using the temp dir to debug a failed test won't
         # spam debug.log.
         self.default_args = [
             "-datadir=" + self.datadir,
             "-logtimemicros",
             "-logthreadnames",
             "-debug",
             "-debugexclude=libevent",
             "-debugexclude=leveldb",
             "-uacomment=" + self.name,
             "-noprinttoconsole",
         ]
 
         if use_valgrind:
             default_suppressions_file = os.path.join(
                 os.path.dirname(os.path.realpath(__file__)),
                 "..", "..", "..", "contrib", "valgrind.supp")
             suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE",
                                           default_suppressions_file)
             self.binary = "valgrind"
             self.bitcoind_args = [bitcoind] + self.default_args
             self.default_args = ["--suppressions={}".format(suppressions_file),
                                  "--gen-suppressions=all", "--exit-on-first-error=yes",
                                  "--error-exitcode=1", "--quiet"] + self.bitcoind_args
 
         if emulator is not None:
             if not os.path.isfile(emulator):
                 raise FileNotFoundError(
                     "Emulator '{}' could not be found.".format(emulator))
         self.emulator = emulator
 
         if use_cli and not os.path.isfile(bitcoin_cli):
             raise FileNotFoundError(
                 "Binary '{}' could not be found.\nTry setting it manually:\n\tBITCOINCLI=<path/to/bitcoin-cli> {}".format(bitcoin_cli, sys.argv[0]))
         self.cli = TestNodeCLI(bitcoin_cli, self.datadir, self.emulator)
         self.use_cli = use_cli
         self.start_perf = start_perf
 
         self.running = False
         self.process = None
         self.rpc_connected = False
         self.rpc = None
         self.url = None
         self.relay_fee_cache = None
         self.log = logging.getLogger('TestFramework.node{}'.format(i))
         # Whether to kill the node when this object goes away
         self.cleanup_on_exit = True
         # Cache perf subprocesses here by their data output filename.
         self.perf_subprocesses = {}
         self.p2ps = []
         self.timeout_factor = timeout_factor
 
     AddressKeyPair = collections.namedtuple(
         'AddressKeyPair', ['address', 'key'])
     PRIV_KEYS = [
         # address , privkey
         AddressKeyPair(
             'mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z',
             'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
         AddressKeyPair(
             'msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg',
             'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
         AddressKeyPair(
             'mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP',
             'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
         AddressKeyPair(
             'mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR',
             'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
         AddressKeyPair(
             'msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws',
             'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
         AddressKeyPair(
             'n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi',
             'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
         AddressKeyPair(
             'myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6',
             'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
         AddressKeyPair(
             'mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8',
             'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
         AddressKeyPair(
             'mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg',
             'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
         AddressKeyPair(
             'mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf',
             'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'),
         AddressKeyPair(
             'mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6',
             'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'),
         AddressKeyPair(
             'mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7',
             'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'),
     ]
 
     def get_deterministic_priv_key(self):
         """Return a deterministic priv key in base58, that only depends on the node's index"""
         assert len(self.PRIV_KEYS) == MAX_NODES
         return self.PRIV_KEYS[self.index]
 
     def _node_msg(self, msg: str) -> str:
         """Return a modified msg that identifies this node by its index as a debugging aid."""
         return "[node {}] {}".format(self.index, msg)
 
     def _raise_assertion_error(self, msg: str):
         """Raise an AssertionError with msg modified to identify this node."""
         raise AssertionError(self._node_msg(msg))
 
     def __del__(self):
         # Ensure that we don't leave any bitcoind processes lying around after
         # the test ends
         if self.process and self.cleanup_on_exit:
             # Should only happen on test failure
             # Avoid using logger, as that may have already been shutdown when
             # this destructor is called.
             print(self._node_msg("Cleaning up leftover process"))
             self.process.kill()
 
     def __getattr__(self, name):
         """Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
         if self.use_cli:
             return getattr(RPCOverloadWrapper(self.cli, True), name)
         else:
             assert self.rpc is not None, self._node_msg(
                 "Error: RPC not initialized")
             assert self.rpc_connected, self._node_msg(
                 "Error: No RPC connection")
             return getattr(RPCOverloadWrapper(self.rpc), name)
 
     def clear_default_args(self):
         self.default_args.clear()
 
     def extend_default_args(self, args):
         self.default_args.extend(args)
 
     def remove_default_args(self, args):
         for rm_arg in args:
             # Remove all occurrences of rm_arg in self.default_args:
             #  - if the arg is a flag (-flag), then the names must match
             #  - if the arg is a value (-key=value) then the name must starts
             #    with "-key=" (the '"' char is to avoid removing "-key_suffix"
             #    arg is "-key" is the argument to remove).
             self.default_args = [def_arg for def_arg in self.default_args
                                  if rm_arg != def_arg and not def_arg.startswith(rm_arg + '=')]
 
     def start(self, extra_args=None, *, cwd=None, stdout=None,
               stderr=None, **kwargs):
         """Start the node."""
         if extra_args is None:
             extra_args = self.extra_args
 
         # Add a new stdout and stderr file each time bitcoind is started
         if stderr is None:
             stderr = tempfile.NamedTemporaryFile(
                 dir=self.stderr_dir, delete=False)
         if stdout is None:
             stdout = tempfile.NamedTemporaryFile(
                 dir=self.stdout_dir, delete=False)
         self.stderr = stderr
         self.stdout = stdout
 
         if cwd is None:
             cwd = self.cwd
 
         # Delete any existing cookie file -- if such a file exists (eg due to
         # unclean shutdown), it will get overwritten anyway by bitcoind, and
         # potentially interfere with our attempt to authenticate
         delete_cookie_file(self.datadir, self.chain)
 
         # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are
         # written to stderr and not the terminal
         subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
 
         p_args = [self.binary] + self.default_args + extra_args
         if self.emulator is not None:
             p_args = [self.emulator] + p_args
         self.process = subprocess.Popen(
             p_args,
             env=subp_env,
             stdout=stdout,
             stderr=stderr,
             cwd=cwd,
             **kwargs)
 
         self.running = True
         self.log.debug("bitcoind started, waiting for RPC to come up")
 
         if self.start_perf:
             self._start_perf()
 
     def wait_for_rpc_connection(self):
         """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
         # Poll at a rate of four times per second
         poll_per_s = 4
         for _ in range(poll_per_s * self.rpc_timeout):
             if self.process.poll() is not None:
                 raise FailedToStartError(self._node_msg(
                     'bitcoind exited with status {} during initialization'.format(self.process.returncode)))
             try:
                 rpc = get_rpc_proxy(
                     rpc_url(
                         self.datadir,
                         self.chain,
                         self.host,
                         self.rpc_port),
                     self.index,
                     # Shorter timeout to allow for one retry in case of
                     # ETIMEDOUT
                     timeout=self.rpc_timeout // 2,
                     coveragedir=self.coverage_dir
                 )
                 rpc.getblockcount()
                 # If the call to getblockcount() succeeds then the RPC
                 # connection is up
                 wait_until(lambda: rpc.getmempoolinfo()['loaded'])
                 # Wait for the node to finish reindex, block import, and
                 # loading the mempool. Usually importing happens fast or
                 # even "immediate" when the node is started. However, there
                 # is no guarantee and sometimes ThreadImport might finish
                 # later. This is going to cause intermittent test failures,
                 # because generally the tests assume the node is fully
                 # ready after being started.
                 #
                 # For example, the node will reject block messages from p2p
                 # when it is still importing with the error "Unexpected
                 # block message received"
                 #
                 # The wait is done here to make tests as robust as possible
                 # and prevent racy tests and intermittent failures as much
                 # as possible. Some tests might not need this, but the
                 # overhead is trivial, and the added guarantees are worth
                 # the minimal performance cost.
 
                 self.log.debug("RPC successfully started")
                 if self.use_cli:
                     return
                 self.rpc = rpc
                 self.rpc_connected = True
                 self.url = self.rpc.url
                 return
             except JSONRPCException as e:  # Initialization phase
                 # -28 RPC in warmup
                 # -342 Service unavailable, RPC server started but is shutting down due to error
                 if e.error['code'] != -28 and e.error['code'] != -342:
                     raise  # unknown JSON RPC exception
             except ConnectionResetError:
                 # This might happen when the RPC server is in warmup, but shut down before the call to getblockcount
                 # succeeds. Try again to properly raise the FailedToStartError
                 pass
             except OSError as e:
                 if e.errno == errno.ETIMEDOUT:
                     # Treat identical to ConnectionResetError
                     pass
                 elif e.errno == errno.ECONNREFUSED:
                     # Port not yet open?
                     pass
                 else:
                     # unknown OS error
                     raise
             except ValueError as e:
                 # cookie file not found and no rpcuser or rpcpassword;
                 # bitcoind is still starting
                 if "No RPC credentials" not in str(e):
                     raise
             time.sleep(1.0 / poll_per_s)
         self._raise_assertion_error(
             "Unable to connect to bitcoind after {}s".format(
                 self.rpc_timeout))
 
     def wait_for_cookie_credentials(self):
         """Ensures auth cookie credentials can be read, e.g. for testing CLI
         with -rpcwait before RPC connection is up."""
         self.log.debug("Waiting for cookie credentials")
         # Poll at a rate of four times per second.
         poll_per_s = 4
         for _ in range(poll_per_s * self.rpc_timeout):
             try:
                 get_auth_cookie(self.datadir, self.chain)
                 self.log.debug("Cookie credentials successfully retrieved")
                 return
             except ValueError:
                 # cookie file not found and no rpcuser or rpcpassword;
                 # bitcoind is still starting so we continue polling until
                 # RPC credentials are retrieved
                 pass
             time.sleep(1.0 / poll_per_s)
         self._raise_assertion_error(
             "Unable to retrieve cookie credentials after {}s".format(
                 self.rpc_timeout))
 
     def generate(self, nblocks, maxtries=1000000):
         self.log.debug(
             "TestNode.generate() dispatches `generate` call to `generatetoaddress`")
         return self.generatetoaddress(
             nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
 
     def get_wallet_rpc(self, wallet_name):
         if self.use_cli:
             return RPCOverloadWrapper(
                 self.cli("-rpcwallet={}".format(wallet_name)), True)
         else:
             assert self.rpc is not None, self._node_msg(
                 "Error: RPC not initialized")
             assert self.rpc_connected, self._node_msg(
                 "Error: RPC not connected")
             wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
             return RPCOverloadWrapper(self.rpc / wallet_path)
 
     def stop_node(self, expected_stderr='', *, wait=0,
                   wait_until_stopped=True):
         """Stop the node."""
         if not self.running:
             return
         self.log.debug("Stopping node")
         try:
             self.stop(wait=wait)
         except http.client.CannotSendRequest:
             self.log.exception("Unable to stop node.")
 
         # If there are any running perf processes, stop them.
         for profile_name in tuple(self.perf_subprocesses.keys()):
             self._stop_perf(profile_name)
 
         # Check that stderr is as expected
         self.stderr.seek(0)
         stderr = self.stderr.read().decode('utf-8').strip()
         if stderr != expected_stderr:
             raise AssertionError(
                 "Unexpected stderr {} != {}".format(stderr, expected_stderr))
 
         self.stdout.close()
         self.stderr.close()
 
         del self.p2ps[:]
 
         if wait_until_stopped:
             self.wait_until_stopped()
 
     def is_node_stopped(self):
         """Checks whether the node has stopped.
 
         Returns True if the node has stopped. False otherwise.
         This method is responsible for freeing resources (self.process)."""
         if not self.running:
             return True
         return_code = self.process.poll()
         if return_code is None:
             return False
 
         # process has stopped. Assert that it didn't return an error code.
         assert return_code == 0, self._node_msg(
             "Node returned non-zero exit code ({}) when stopping".format(return_code))
         self.running = False
         self.process = None
         self.rpc_connected = False
         self.rpc = None
         self.log.debug("Node stopped")
         return True
 
     def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
         wait_until(
             self.is_node_stopped,
             timeout=timeout,
             timeout_factor=self.timeout_factor)
 
     @contextlib.contextmanager
     def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):
         """Assert that some debug messages are present within some timeout.
         Unexpected debug messages may be optionally provided to fail a test
         if they appear before expected messages.
 
         Note: expected_msgs must always be non-empty even if the goal is to check
         for unexpected_msgs. This provides a bounded scenario such that "we expect
         to reach some target resulting in expected_msgs without seeing unexpected_msgs.
         Otherwise, we are testing that something never happens, which is fundamentally
         not robust test logic.
         """
         if not expected_msgs:
             raise AssertionError("Expected debug messages is empty")
         if unexpected_msgs is None:
             unexpected_msgs = []
         time_end = time.time() + timeout * self.timeout_factor
         debug_log = os.path.join(self.datadir, self.chain, 'debug.log')
         with open(debug_log, encoding='utf-8') as dl:
             dl.seek(0, 2)
             prev_size = dl.tell()
 
         yield
 
         while True:
             found = True
             with open(debug_log, encoding='utf-8') as dl:
                 dl.seek(prev_size)
                 log = dl.read()
             print_log = " - " + "\n - ".join(log.splitlines())
             for unexpected_msg in unexpected_msgs:
                 if re.search(re.escape(unexpected_msg),
                              log, flags=re.MULTILINE):
                     self._raise_assertion_error(
                         'Unexpected message "{}" partially matches log:\n\n{}\n\n'.format(
                             unexpected_msg, print_log))
             for expected_msg in expected_msgs:
                 if re.search(re.escape(expected_msg), log,
                              flags=re.MULTILINE) is None:
                     found = False
             if found:
                 return
             if time.time() >= time_end:
                 break
             time.sleep(0.05)
         self._raise_assertion_error(
             'Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(
                 str(expected_msgs), print_log))
 
     @contextlib.contextmanager
     def profile_with_perf(self, profile_name):
         """
         Context manager that allows easy profiling of node activity using `perf`.
 
         See `test/functional/README.md` for details on perf usage.
 
         Args:
             profile_name (str): This string will be appended to the
                 profile data filename generated by perf.
         """
         subp = self._start_perf(profile_name)
 
         yield
 
         if subp:
             self._stop_perf(profile_name)
 
     def _start_perf(self, profile_name=None):
         """Start a perf process to profile this node.
 
         Returns the subprocess running perf."""
         subp = None
 
         def test_success(cmd):
             return subprocess.call(
                 # shell=True required for pipe use below
                 cmd, shell=True,
                 stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
 
         if not sys.platform.startswith('linux'):
             self.log.warning(
                 "Can't profile with perf; only availabe on Linux platforms")
             return None
 
         if not test_success('which perf'):
             self.log.warning(
                 "Can't profile with perf; must install perf-tools")
             return None
 
         if not test_success(
                 'readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
             self.log.warning(
                 "perf output won't be very useful without debug symbols compiled into bitcoind")
 
         output_path = tempfile.NamedTemporaryFile(
             dir=self.datadir,
             prefix="{}.perf.data.".format(profile_name or 'test'),
             delete=False,
         ).name
 
         cmd = [
             'perf', 'record',
             '-g',                     # Record the callgraph.
             # Compatibility for gcc's --fomit-frame-pointer.
             '--call-graph', 'dwarf',
             '-F', '101',              # Sampling frequency in Hz.
             '-p', str(self.process.pid),
             '-o', output_path,
         ]
         subp = subprocess.Popen(
             cmd,
             stdout=subprocess.PIPE,
             stderr=subprocess.PIPE)
         self.perf_subprocesses[profile_name] = subp
 
         return subp
 
     def _stop_perf(self, profile_name):
         """Stop (and pop) a perf subprocess."""
         subp = self.perf_subprocesses.pop(profile_name)
         output_path = subp.args[subp.args.index('-o') + 1]
 
         subp.terminate()
         subp.wait(timeout=10)
 
         stderr = subp.stderr.read().decode()
         if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
             self.log.warning(
                 "perf couldn't collect data! Try "
                 "'sudo sysctl -w kernel.perf_event_paranoid=-1'")
         else:
             report_cmd = "perf report -i {}".format(output_path)
             self.log.info("See perf output by running '{}'".format(report_cmd))
 
     def assert_start_raises_init_error(
             self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
         """Attempt to start the node and expect it to raise an error.
 
         extra_args: extra arguments to pass through to bitcoind
         expected_msg: regex that stderr should match when bitcoind fails
 
         Will throw if bitcoind starts without an error.
         Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
         with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
                 tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
             try:
                 self.start(extra_args, stdout=log_stdout,
                            stderr=log_stderr, *args, **kwargs)
                 self.wait_for_rpc_connection()
                 self.stop_node()
                 self.wait_until_stopped()
             except FailedToStartError as e:
                 self.log.debug('bitcoind failed to start: {}'.format(e))
                 self.running = False
                 self.process = None
                 # Check stderr for expected message
                 if expected_msg is not None:
                     log_stderr.seek(0)
                     stderr = log_stderr.read().decode('utf-8').strip()
                     if match == ErrorMatch.PARTIAL_REGEX:
                         if re.search(expected_msg, stderr,
                                      flags=re.MULTILINE) is None:
                             self._raise_assertion_error(
                                 'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
                     elif match == ErrorMatch.FULL_REGEX:
                         if re.fullmatch(expected_msg, stderr) is None:
                             self._raise_assertion_error(
                                 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
                     elif match == ErrorMatch.FULL_TEXT:
                         if expected_msg != stderr:
                             self._raise_assertion_error(
                                 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
             else:
                 if expected_msg is None:
                     assert_msg = "bitcoind should have exited with an error"
                 else:
                     assert_msg = "bitcoind should have exited with expected error " + expected_msg
                 self._raise_assertion_error(assert_msg)
 
     def relay_fee(self, cached=True):
         if not self.relay_fee_cache or not cached:
             self.relay_fee_cache = self.getnetworkinfo()["relayfee"]
 
         return self.relay_fee_cache
 
     def calculate_fee(self, tx):
         """ Estimate the necessary fees (in sats) for an unsigned CTransaction assuming:
         - the current relayfee on node
         - all inputs are compressed-key p2pkh, and will be signed ecdsa or schnorr
         - all inputs currently unsigned (empty scriptSig)
         """
         billable_size_estimate = tx.billable_size()
         # Add some padding for signatures / public keys
         # 107 = length of PUSH(longest_sig = 72 bytes), PUSH(pubkey = 33 bytes)
         billable_size_estimate += len(tx.vin) * 107
 
         # relay_fee gives a value in XEC per kB.
         return int(self.relay_fee() / 1000 * billable_size_estimate * XEC)
 
     def calculate_fee_from_txid(self, txid):
         ctx = FromHex(CTransaction(), self.getrawtransaction(txid))
         return self.calculate_fee(ctx)
 
     def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
         """Add a p2p connection to the node.
 
         This method adds the p2p connection to the self.p2ps list and also
         returns the connection to the caller."""
         if 'dstport' not in kwargs:
             kwargs['dstport'] = p2p_port(self.index)
         if 'dstaddr' not in kwargs:
             kwargs['dstaddr'] = '127.0.0.1'
 
         p2p_conn.peer_connect(
             **kwargs,
             net=self.chain,
             timeout_factor=self.timeout_factor)()
         self.p2ps.append(p2p_conn)
+        p2p_conn.wait_until(
+            lambda: p2p_conn.is_connected,
+            check_connected=False)
         if wait_for_verack:
             # Wait for the node to send us the version and verack
             p2p_conn.wait_for_verack()
             # At this point we have sent our version message and received the version and verack, however the full node
             # has not yet received the verack from us (in reply to their version). So, the connection is not yet fully
             # established (fSuccessfullyConnected).
             #
             # This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the
             # message we send. However, it might lead to races where we are expecting to receive a message. E.g. a
             # transaction that will be added to the mempool as soon as we return here.
             #
             # So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds)
             # in comparison to the upside of making tests less fragile and
             # unexpected intermittent errors less likely.
             p2p_conn.sync_with_ping()
 
         return p2p_conn
 
     @property
     def p2p(self):
         """Return the first p2p connection
 
         Convenience property - most tests only use a single p2p connection to each
         node, so this saves having to write node.p2ps[0] many times."""
         assert self.p2ps, self._node_msg("No p2p connection")
         return self.p2ps[0]
 
     def num_test_p2p_connections(self):
         """Return number of test framework p2p connections to the node."""
         return len([peer for peer in self.getpeerinfo()
                     if peer['subver'] == MY_SUBVERSION])
 
     def disconnect_p2ps(self):
         """Close all p2p connections to the node."""
         for p in self.p2ps:
             p.peer_disconnect()
         del self.p2ps[:]
         wait_until(lambda: self.num_test_p2p_connections() == 0)
 
 
 class TestNodeCLIAttr:
     def __init__(self, cli, command):
         self.cli = cli
         self.command = command
 
     def __call__(self, *args, **kwargs):
         return self.cli.send_cli(self.command, *args, **kwargs)
 
     def get_request(self, *args, **kwargs):
         return lambda: self(*args, **kwargs)
 
 
 def arg_to_cli(arg):
     if isinstance(arg, bool):
         return str(arg).lower()
     elif isinstance(arg, dict) or isinstance(arg, list):
         return json.dumps(arg, default=EncodeDecimal)
     else:
         return str(arg)
 
 
 class TestNodeCLI():
     """Interface to bitcoin-cli for an individual node"""
 
     def __init__(self, binary, datadir, emulator=None):
         self.options = []
         self.binary = binary
         self.datadir = datadir
         self.input = None
         self.log = logging.getLogger('TestFramework.bitcoincli')
         self.emulator = emulator
 
     def __call__(self, *options, input=None):
         # TestNodeCLI is callable with bitcoin-cli command-line options
         cli = TestNodeCLI(self.binary, self.datadir, self.emulator)
         cli.options = [str(o) for o in options]
         cli.input = input
         return cli
 
     def __getattr__(self, command):
         return TestNodeCLIAttr(self, command)
 
     def batch(self, requests):
         results = []
         for request in requests:
             try:
                 results.append(dict(result=request()))
             except JSONRPCException as e:
                 results.append(dict(error=e))
         return results
 
     def send_cli(self, command=None, *args, **kwargs):
         """Run bitcoin-cli command. Deserializes returned string as python object."""
         pos_args = [arg_to_cli(arg) for arg in args]
         named_args = [str(key) + "=" + arg_to_cli(value)
                       for (key, value) in kwargs.items()]
         assert not (
             pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
         p_args = [self.binary, "-datadir=" + self.datadir] + self.options
         if named_args:
             p_args += ["-named"]
         if command is not None:
             p_args += [command]
         p_args += pos_args + named_args
         self.log.debug("Running bitcoin-cli {}".format(p_args[2:]))
         if self.emulator is not None:
             p_args = [self.emulator] + p_args
         process = subprocess.Popen(p_args, stdin=subprocess.PIPE,
                                    stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
         cli_stdout, cli_stderr = process.communicate(input=self.input)
         returncode = process.poll()
         if returncode:
             match = re.match(
                 r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
             if match:
                 code, message = match.groups()
                 raise JSONRPCException(dict(code=int(code), message=message))
             # Ignore cli_stdout, raise with cli_stderr
             raise subprocess.CalledProcessError(
                 returncode, self.binary, output=cli_stderr)
         try:
             return json.loads(cli_stdout, parse_float=decimal.Decimal)
         except (json.JSONDecodeError, decimal.InvalidOperation):
             return cli_stdout.rstrip("\n")
 
 
 class RPCOverloadWrapper():
     def __init__(self, rpc, cli=False):
         self.rpc = rpc
         self.is_cli = cli
 
     def __getattr__(self, name):
         return getattr(self.rpc, name)
 
     def importprivkey(self, privkey, label=None, rescan=None):
         wallet_info = self.getwalletinfo()
         if self.is_cli:
             if label is None:
                 label = 'null'
             if rescan is None:
                 rescan = 'null'
         if 'descriptors' not in wallet_info or (
                 'descriptors' in wallet_info and not wallet_info['descriptors']):
             return self.__getattr__('importprivkey')(privkey, label, rescan)
         desc = descsum_create('combo(' + privkey + ')')
         req = [{
             'desc': desc,
             'timestamp': 0 if rescan else 'now',
             'label': label if label else ''
         }]
         import_res = self.importdescriptors(req)
         if not import_res[0]['success']:
             raise JSONRPCException(import_res[0]['error'])
 
     def addmultisigaddress(self, nrequired, keys,
                            label=None):
         wallet_info = self.getwalletinfo()
         if self.is_cli:
             if label is None:
                 label = 'null'
         if 'descriptors' not in wallet_info or (
                 'descriptors' in wallet_info and not wallet_info['descriptors']):
             return self.__getattr__('addmultisigaddress')(
                 nrequired, keys, label)
         cms = self.createmultisig(nrequired, keys)
         req = [{
             'desc': cms['descriptor'],
             'timestamp': 0,
             'label': label if label else ''
         }]
         import_res = self.importdescriptors(req)
         if not import_res[0]['success']:
             raise JSONRPCException(import_res[0]['error'])
         return cms
 
     def importpubkey(self, pubkey, label=None, rescan=None):
         wallet_info = self.getwalletinfo()
         if self.is_cli:
             if label is None:
                 label = 'null'
             if rescan is None:
                 rescan = 'null'
         if 'descriptors' not in wallet_info or (
                 'descriptors' in wallet_info and not wallet_info['descriptors']):
             return self.__getattr__('importpubkey')(pubkey, label, rescan)
         desc = descsum_create('combo(' + pubkey + ')')
         req = [{
             'desc': desc,
             'timestamp': 0 if rescan else 'now',
             'label': label if label else ''
         }]
         import_res = self.importdescriptors(req)
         if not import_res[0]['success']:
             raise JSONRPCException(import_res[0]['error'])
 
     def importaddress(self, address, label=None, rescan=None, p2sh=None):
         wallet_info = self.getwalletinfo()
         if self.is_cli:
             if label is None:
                 label = 'null'
             if rescan is None:
                 rescan = 'null'
             if p2sh is None:
                 p2sh = 'null'
         if 'descriptors' not in wallet_info or (
                 'descriptors' in wallet_info and not wallet_info['descriptors']):
             return self.__getattr__('importaddress')(
                 address, label, rescan, p2sh)
         is_hex = False
         try:
             int(address, 16)
             is_hex = True
             desc = descsum_create('raw(' + address + ')')
         except BaseException:
             desc = descsum_create('addr(' + address + ')')
         reqs = [{
             'desc': desc,
             'timestamp': 0 if rescan else 'now',
             'label': label if label else ''
         }]
         if is_hex and p2sh:
             reqs.append({
                 'desc': descsum_create('p2sh(raw(' + address + '))'),
                 'timestamp': 0 if rescan else 'now',
                 'label': label if label else ''
             })
         import_res = self.importdescriptors(reqs)
         for res in import_res:
             if not res['success']:
                 raise JSONRPCException(res['error'])