diff --git a/cmake/utils/gen-ninja-deps.py b/cmake/utils/gen-ninja-deps.py
index 57674206e..ab2dd4e23 100755
--- a/cmake/utils/gen-ninja-deps.py
+++ b/cmake/utils/gen-ninja-deps.py
@@ -1,181 +1,181 @@
 #!/usr/bin/env python3
 
-import subprocess
-import os
 import argparse
+import os
+import subprocess
 
 parser = argparse.ArgumentParser(description='Produce a dep file from ninja.')
 parser.add_argument(
     '--build-dir',
     help='The build directory.',
     required=True)
 parser.add_argument(
     '--base-dir',
     help='The directory for which dependencies are rewriten.',
     required=True)
 parser.add_argument('--ninja', help='The ninja executable to use.')
 parser.add_argument(
     'base_target',
     help="The target from the base's perspective.")
 parser.add_argument(
     'targets', nargs='+',
     help='The target for which dependencies are extracted.')
 parser.add_argument(
     '--extra-deps', nargs='+',
     help='Extra dependencies.')
 
 args = parser.parse_args()
 build_dir = os.path.abspath(args.build_dir)
 base_dir = os.path.abspath(args.base_dir)
 ninja = args.ninja
 base_target = args.base_target
 targets = args.targets
 extra_deps = args.extra_deps
 
 # Make sure we operate in the right folder.
 os.chdir(build_dir)
 
 if ninja is None:
     ninja = subprocess.check_output(['command', '-v', 'ninja'])[:-1]
 
 # Construct the set of all targets
 all_targets = set()
 doto_targets = set()
 for t in subprocess.check_output([ninja, '-t', 'targets', 'all']).splitlines():
     t, r = t.split(b':')
     all_targets.add(t)
     if r[:13] == b' C_COMPILER__' or r[:15] == b' CXX_COMPILER__':
         doto_targets.add(t)
 
 
 def parse_ninja_query(query):
     deps = dict()
     lines = query.splitlines()
 
     while len(lines):
         line = lines.pop(0)
         if line[0] == ord(' '):
             continue
 
         # We have a new target
         target = line.split(b':')[0]
         assert lines.pop(0)[:8] == b'  input:'
 
         inputs = set()
         while True:
             i = lines.pop(0)
             if i[:4] != b'    ':
                 break
 
             '''
             ninja has 3 types of input:
               1. Explicit dependencies, no prefix;
               2. Implicit dependencies, | prefix.
               3. Order only dependencies, || prefix.
 
             Order only dependency do not require the target to be rebuilt
             and so we ignore them.
             '''
             i = i[4:]
             if i[0] == ord('|'):
                 if i[1] == ord('|'):
                     # We reached the order only dependencies.
                     break
                 i = i[2:]
 
             inputs.add(i)
 
         deps[target] = inputs
 
     return deps
 
 
 def extract_deps(workset):
     # Recursively extract the dependencies of the target.
     deps = dict()
     while len(workset) > 0:
         query = subprocess.check_output([ninja, '-t', 'query'] + list(workset))
         target_deps = parse_ninja_query(query)
         deps.update(target_deps)
 
         workset = set()
         for d in target_deps.values():
             workset.update(t for t in d if t in all_targets and t not in deps)
 
     # Extract build time dependencies.
     bt_targets = [t for t in deps if t in doto_targets]
     if len(bt_targets) == 0:
         return deps
 
     ndeps = subprocess.check_output(
         [ninja, '-t', 'deps'] + bt_targets,
         stderr=subprocess.DEVNULL)
 
     lines = ndeps.splitlines()
     while len(lines) > 0:
         line = lines.pop(0)
         t, m = line.split(b':')
         if m == b' deps not found':
             continue
 
         inputs = set()
         while True:
             i = lines.pop(0)
             if i == b'':
                 break
 
             assert i[:4] == b'    '
             inputs.add(i[4:])
 
         deps[t] = inputs
 
     return deps
 
 
 base_dir = base_dir.encode()
 
 
 def rebase_deps(deps):
     rebased = dict()
     cache = dict()
 
     def rebase(path):
         if path in cache:
             return cache[path]
 
         abspath = os.path.abspath(path)
         newpath = path if path == abspath else os.path.relpath(
             abspath, base_dir)
         cache[path] = newpath
         return newpath
 
     for t, s in deps.items():
         rebased[rebase(t)] = set(rebase(d) for d in s)
 
     return rebased
 
 
 deps = extract_deps(set(targets))
 deps = rebase_deps(deps)
 
 
 def dump(deps):
     for t, d in deps.items():
         if len(d) == 0:
             continue
 
         str = t.decode() + ": \\\n  "
         str += " \\\n  ".join(sorted(map((lambda x: x.decode()), d)))
 
         print(str)
 
 
 # Collapse everything under the base target.
 basedeps = set() if extra_deps is None else set(d.encode() for d in extra_deps)
 for d in deps.values():
     basedeps.update(d)
 
 base_target = base_target.encode()
 basedeps.discard(base_target)
 
 dump({base_target: basedeps})
diff --git a/share/qt/extract_strings_qt.py b/share/qt/extract_strings_qt.py
index dd6bad057..a9cdd538b 100755
--- a/share/qt/extract_strings_qt.py
+++ b/share/qt/extract_strings_qt.py
@@ -1,89 +1,89 @@
 #!/usr/bin/env python3
 # Copyright (c) 2012-2017 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 '''
 Extract _("...") strings for translation and convert to Qt stringdefs so that
 they can be picked up by Qt linguist.
 '''
-from subprocess import Popen, PIPE
 import operator
 import os
 import sys
+from subprocess import PIPE, Popen
 
 OUT_CPP = "qt/bitcoinstrings.cpp"
 EMPTY = ['""']
 
 
 def parse_po(text):
     """
     Parse 'po' format produced by xgettext.
     Return a list of (msgid,msgstr) tuples.
     """
     messages = []
     msgid = []
     msgstr = []
     in_msgid = False
     in_msgstr = False
 
     for line in text.split('\n'):
         line = line.rstrip('\r')
         if line.startswith('msgid '):
             if in_msgstr:
                 messages.append((msgid, msgstr))
                 in_msgstr = False
             # message start
             in_msgid = True
 
             msgid = [line[6:]]
         elif line.startswith('msgstr '):
             in_msgid = False
             in_msgstr = True
             msgstr = [line[7:]]
         elif line.startswith('"'):
             if in_msgid:
                 msgid.append(line)
             if in_msgstr:
                 msgstr.append(line)
 
     if in_msgstr:
         messages.append((msgid, msgstr))
 
     return messages
 
 
 files = sys.argv[1:]
 
 # xgettext -n --keyword=_ $FILES
 XGETTEXT = os.getenv('XGETTEXT', 'xgettext')
 if not XGETTEXT:
     print(
         'Cannot extract strings: xgettext utility is not installed or not configured.',
         file=sys.stderr)
     print('Please install package "gettext" and re-run \'./configure\'.',
           file=sys.stderr)
     sys.exit(1)
 child = Popen([XGETTEXT, '--output=-', '-n',
                '--keyword=_'] + files, stdout=PIPE)
 (out, err) = child.communicate()
 
 messages = parse_po(out.decode('utf-8'))
 
 f = open(OUT_CPP, 'w', encoding="utf8")
 f.write('#include <QtGlobal>\n')
 f.write('// Automatically @{} by extract_strings_qt.py\n'.format('generated'))
 f.write("""
 #ifdef __GNUC__
 #define UNUSED __attribute__((unused))
 #else
 #define UNUSED
 #endif
 """)
 f.write('static const char UNUSED *bitcoin_strings[] = {\n')
 f.write('QT_TRANSLATE_NOOP("bitcoin-abc", "{}"),\n'.format(os.getenv('COPYRIGHT_HOLDERS'),))
 messages.sort(key=operator.itemgetter(0))
 for (msgid, msgstr) in messages:
     if msgid != EMPTY:
         f.write('QT_TRANSLATE_NOOP("bitcoin-abc", {}),\n'.format('\n'.join(msgid)))
 f.write('};\n')
 f.close()
diff --git a/share/rpcauth/rpcauth.py b/share/rpcauth/rpcauth.py
index bf5a2fdcb..ff5dad845 100755
--- a/share/rpcauth/rpcauth.py
+++ b/share/rpcauth/rpcauth.py
@@ -1,54 +1,53 @@
 #!/usr/bin/env python3
 # Copyright (c) 2015-2018 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
+import hmac
 from argparse import ArgumentParser
 from base64 import urlsafe_b64encode
 from binascii import hexlify
 from getpass import getpass
 from os import urandom
 
-import hmac
-
 
 def generate_salt(size):
     """Create size byte hex salt"""
     return hexlify(urandom(size)).decode()
 
 
 def generate_password():
     """Create 32 byte b64 password"""
     return urlsafe_b64encode(urandom(32)).decode('utf-8')
 
 
 def password_to_hmac(salt, password):
     m = hmac.new(bytearray(salt, 'utf-8'),
                  bytearray(password, 'utf-8'), 'SHA256')
     return m.hexdigest()
 
 
 def main():
     parser = ArgumentParser(
         description='Create login credentials for a JSON-RPC user')
     parser.add_argument('username', help='the username for authentication')
     parser.add_argument(
         'password', help='leave empty to generate a random password or specify "-" to prompt for password', nargs='?')
     args = parser.parse_args()
 
     if not args.password:
         args.password = generate_password()
     elif args.password == '-':
         args.password = getpass()
 
     # Create 16 byte hex salt
     salt = generate_salt(16)
     password_hmac = password_to_hmac(salt, args.password)
 
     print('String to be appended to bitcoin.conf:')
     print('rpcauth={0}:{1}${2}'.format(args.username, salt, password_hmac))
     print('Your password:\n{0}'.format(args.password))
 
 
 if __name__ == '__main__':
     main()
diff --git a/src/test/data/generate_asmap.py b/src/test/data/generate_asmap.py
index a412930ce..ef510bb9e 100755
--- a/src/test/data/generate_asmap.py
+++ b/src/test/data/generate_asmap.py
@@ -1,26 +1,26 @@
 #!/usr/bin/env python3
 # Copyright (c) 2020 The Bitcoin developers
 
-from pathlib import Path
 import sys
+from pathlib import Path
 
 
 def main(input_file, output_file):
     with open(input_file, 'rb') as f:
         contents = f.read()
 
     with open(output_file, "w", encoding="utf-8") as f:
         f.write(
             "static unsigned const char {}_raw[] = {{\n".format(
                 Path(input_file).stem))
         f.write(", ".join(map(lambda x: "0x{:02x}".format(x), contents)))
         f.write("\n};\n")
 
 
 if __name__ == "__main__":
     if len(sys.argv) != 3:
         print("Invalid parameters\nUsage: {} input_file output_file".format(
             Path(sys.argv[0]).name))
         sys.exit(1)
 
     main(sys.argv[1], sys.argv[2])
diff --git a/test/functional/combine_logs.py b/test/functional/combine_logs.py
index a5b832bbc..af9ee83ca 100755
--- a/test/functional/combine_logs.py
+++ b/test/functional/combine_logs.py
@@ -1,223 +1,223 @@
 #!/usr/bin/env python3
 # Copyright (c) 2017-2020 The Bitcoin developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Combine logs from multiple bitcoin nodes as well as the test_framework log.
 
 This streams the combined log output to stdout. Use combine_logs.py > outputfile
 to write to an outputfile.
 
 If no argument is provided, the most recent test directory will be used."""
 
 import argparse
-from collections import defaultdict, namedtuple
 import heapq
 import itertools
 import os
 import pathlib
 import re
 import sys
 import tempfile
+from collections import defaultdict, namedtuple
 
 # N.B.: don't import any local modules here - this script must remain executable
 # without the parent module installed.
 
 # Should match same symbol in `test_framework.test_framework`.
 TMPDIR_PREFIX = "bitcoin_func_test_"
 
 # Matches on the date format at the start of the log event
 TIMESTAMP_PATTERN = re.compile(
     r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?Z")
 
 LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
 
 
 def main():
     """Main function. Parses args, reads the log files and renders them as text or html."""
     parser = argparse.ArgumentParser(
         description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
     parser.add_argument(
         'testdir', nargs='?', default='',
         help='temporary test directory to combine logs from. '
              'Defaults to the most recent')
     parser.add_argument('-c', '--color', dest='color', action='store_true',
                         help='outputs the combined log with events colored by '
                              'source (requires posix terminal colors. Use less'
                              ' -r for viewing)')
     parser.add_argument('--html', dest='html', action='store_true',
                         help='outputs the combined log as html. '
                              'Requires jinja2. pip install jinja2')
     args = parser.parse_args()
 
     if args.html and args.color:
         print("Only one out of --color or --html should be specified")
         sys.exit(1)
 
     testdir = args.testdir or find_latest_test_dir()
 
     if not testdir:
         print("No test directories found")
         sys.exit(1)
 
     if not args.testdir:
         print("Opening latest test directory: {}".format(testdir),
               file=sys.stderr)
 
     colors = defaultdict(lambda: '')
     if args.color:
         colors["test"] = "\033[0;36m"  # CYAN
         colors["node0"] = "\033[0;34m"  # BLUE
         colors["node1"] = "\033[0;32m"  # GREEN
         colors["node2"] = "\033[0;31m"  # RED
         colors["node3"] = "\033[0;33m"  # YELLOW
         colors["reset"] = "\033[0m"  # Reset font color
 
     log_events = read_logs(testdir)
 
     if args.html:
         print_logs_html(log_events)
     else:
         print_logs_plain(log_events, colors)
         print_node_warnings(testdir, colors)
 
 
 def read_logs(tmp_dir):
     """Reads log files.
 
     Delegates to generator function get_log_events() to provide individual log events
     for each of the input log files."""
 
     # Find out what the folder is called that holds the debug.log file
     glob = pathlib.Path(tmp_dir).glob('node0/**/debug.log')
     path = next(glob, None)
     if path:
         # more than one debug.log should never happen
         assert next(glob, None) is None
         # extract the chain name
         chain = re.search(r'node0/(.+?)/debug\.log$',
                           path.as_posix()).group(1)
     else:
         # fallback to regtest (should only happen when none exists)
         chain = 'regtest'
 
     files = [("test", "{}/test_framework.log".format(tmp_dir))]
     for i in itertools.count():
         logfile = "{}/node{}/{}/debug.log".format(tmp_dir, i, chain)
         if not os.path.isfile(logfile):
             break
         files.append(("node{}".format(i), logfile))
 
     return heapq.merge(*[get_log_events(source, f) for source, f in files])
 
 
 def print_node_warnings(tmp_dir, colors):
     """Print nodes' errors and warnings"""
 
     warnings = []
     for stream in ['stdout', 'stderr']:
         for i in itertools.count():
             folder = "{}/node{}/{}".format(tmp_dir, i, stream)
             if not os.path.isdir(folder):
                 break
             for (_, _, fns) in os.walk(folder):
                 for fn in fns:
                     warning = pathlib.Path(
                         '{}/{}'.format(folder, fn)).read_text().strip()
                     if warning:
                         warnings.append(("node{} {}".format(i, stream),
                                          warning))
 
     print()
     for w in warnings:
         print("{} {} {} {}".format(colors[w[0].split()[0]],
                                    w[0], w[1], colors["reset"]))
 
 
 def find_latest_test_dir():
     """Returns the latest tmpfile test directory prefix."""
     tmpdir = tempfile.gettempdir()
 
     def join_tmp(basename):
         return os.path.join(tmpdir, basename)
 
     def is_valid_test_tmpdir(basename):
         fullpath = join_tmp(basename)
         return (
             os.path.isdir(fullpath)
             and basename.startswith(TMPDIR_PREFIX)
             and os.access(fullpath, os.R_OK)
         )
 
     testdir_paths = [join_tmp(name) for name in os.listdir(tmpdir)
                      if is_valid_test_tmpdir(name)]
 
     return max(testdir_paths, key=os.path.getmtime) if testdir_paths else None
 
 
 def get_log_events(source, logfile):
     """Generator function that returns individual log events.
 
     Log events may be split over multiple lines. We use the timestamp
     regex match as the marker for a new log event."""
     try:
         with open(logfile, 'r', encoding='utf-8') as infile:
             event = ''
             timestamp = ''
             for line in infile:
                 # skip blank lines
                 if line == '\n':
                     continue
                 # if this line has a timestamp, it's the start of a new log
                 # event.
                 time_match = TIMESTAMP_PATTERN.match(line)
                 if time_match:
                     if event:
                         yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
                     timestamp = time_match.group()
                     if time_match.group(1) is None:
                         # timestamp does not have microseconds. Add zeroes.
                         timestamp_micro = timestamp.replace("Z", ".000000Z")
                         line = line.replace(timestamp, timestamp_micro)
                         timestamp = timestamp_micro
                     event = line
                 # if it doesn't have a timestamp, it's a continuation line of
                 # the previous log.
                 else:
                     # Add the line. Prefix with space equivalent to the source
                     # + timestamp so log lines are aligned
                     event += "                                   " + line
             # Flush the final event
             yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
     except FileNotFoundError:
         print("File {} could not be opened. Continuing without it.".format(
             logfile), file=sys.stderr)
 
 
 def print_logs_plain(log_events, colors):
     """Renders the iterator of log events into text."""
     for event in log_events:
         lines = event.event.splitlines()
         print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()],
                                            event.source, lines[0],
                                            colors["reset"]))
         if len(lines) > 1:
             for line in lines[1:]:
                 print("{0}{1}{2}".format(
                     colors[event.source.rstrip()], line, colors["reset"]))
 
 
 def print_logs_html(log_events):
     """Renders the iterator of log events into html."""
     try:
         import jinja2
     except ImportError:
         print("jinja2 not found. Try `pip install jinja2`")
         sys.exit(1)
     print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
                 .get_template('combined_log_template.html')
                 .render(title="Combined Logs from testcase",
                         log_events=[event._asdict() for event in log_events]))
 
 
 if __name__ == '__main__':
     main()
diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py
index 38da70d2a..ac439303f 100644
--- a/test/functional/data/invalid_txs.py
+++ b/test/functional/data/invalid_txs.py
@@ -1,257 +1,255 @@
 #!/usr/bin/env python3
 # Copyright (c) 2015-2018 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """
 Templates for constructing various sorts of invalid transactions.
 
 These templates (or an iterator over all of them) can be reused in different
 contexts to test using a number of invalid transaction types.
 
 Hopefully this makes it easier to get coverage of a full variety of tx
 validation checks through different interfaces (AcceptBlock, AcceptToMemPool,
 etc.) without repeating ourselves.
 
 Invalid tx cases not covered here can be found by running:
 
     $ diff \
       <(grep -IREho "bad-txns[a-zA-Z-]+" src | sort -u) \
       <(grep -IEho "bad-txns[a-zA-Z-]+" test/functional/data/invalid_txs.py | sort -u)
 
 """
 import abc
-
 from typing import Optional
+
+from test_framework import script as sc
+from test_framework.blocktools import create_tx_with_script
 from test_framework.messages import (
+    MAX_MONEY,
     COutPoint,
     CTransaction,
     CTxIn,
     CTxOut,
-    MAX_MONEY,
 )
-from test_framework import script as sc
-from test_framework.blocktools import create_tx_with_script
-from test_framework.txtools import pad_tx
-
-
 from test_framework.script import (
-    CScript,
-    OP_INVERT,
-    OP_2MUL,
     OP_2DIV,
-    OP_MUL,
+    OP_2MUL,
+    OP_INVERT,
     OP_LSHIFT,
-    OP_RSHIFT
+    OP_MUL,
+    OP_RSHIFT,
+    CScript,
 )
+from test_framework.txtools import pad_tx
 
 basic_p2sh = sc.CScript(
     [sc.OP_HASH160, sc.hash160(sc.CScript([sc.OP_0])), sc.OP_EQUAL])
 
 
 class BadTxTemplate:
     """Allows simple construction of a certain kind of invalid tx. Base class to be subclassed."""
     __metaclass__ = abc.ABCMeta
 
     # The expected error code given by bitcoind upon submission of the tx.
     reject_reason: Optional[str] = ""
 
     # Only specified if it differs from mempool acceptance error.
     block_reject_reason = ""
 
     # Do we expect to be disconnected after submitting this tx?
     expect_disconnect = False
 
     # Is this tx considered valid when included in a block, but not for acceptance into
     # the mempool (i.e. does it violate policy but not consensus)?
     valid_in_block = False
 
     def __init__(self, *, spend_tx=None, spend_block=None):
         self.spend_tx = spend_block.vtx[0] if spend_block else spend_tx
         self.spend_avail = sum(o.nValue for o in self.spend_tx.vout)
         self.valid_txin = CTxIn(
             COutPoint(
                 self.spend_tx.sha256,
                 0),
             b"",
             0xffffffff)
 
     @abc.abstractmethod
     def get_tx(self, *args, **kwargs):
         """Return a CTransaction that is invalid per the subclass."""
         pass
 
 
 class OutputMissing(BadTxTemplate):
     reject_reason = "bad-txns-vout-empty"
     expect_disconnect = True
 
     def get_tx(self):
         tx = CTransaction()
         tx.vin.append(self.valid_txin)
         tx.calc_sha256()
         return tx
 
 
 class InputMissing(BadTxTemplate):
     reject_reason = "bad-txns-vin-empty"
     expect_disconnect = True
 
     def get_tx(self):
         tx = CTransaction()
         tx.vout.append(CTxOut(0, sc.CScript([sc.OP_TRUE] * 100)))
         tx.calc_sha256()
         return tx
 
 
 class SizeTooSmall(BadTxTemplate):
     reject_reason = "bad-txns-undersize"
     expect_disconnect = False
     valid_in_block = True
 
     def get_tx(self):
         tx = CTransaction()
         tx.vin.append(self.valid_txin)
         tx.vout.append(CTxOut(0, sc.CScript([sc.OP_TRUE])))
         tx.calc_sha256()
         return tx
 
 
 class BadInputOutpointIndex(BadTxTemplate):
     # Won't be rejected - nonexistent outpoint index is treated as an orphan since the coins
     # database can't distinguish between spent outpoints and outpoints which
     # never existed.
     reject_reason = None
     expect_disconnect = False
 
     def get_tx(self):
         num_indices = len(self.spend_tx.vin)
         bad_idx = num_indices + 100
 
         tx = CTransaction()
         tx.vin.append(
             CTxIn(
                 COutPoint(
                     self.spend_tx.sha256,
                     bad_idx),
                 b"",
                 0xffffffff))
         tx.vout.append(CTxOut(0, basic_p2sh))
         tx.calc_sha256()
         return tx
 
 
 class DuplicateInput(BadTxTemplate):
     reject_reason = 'bad-txns-inputs-duplicate'
     expect_disconnect = True
 
     def get_tx(self):
         tx = CTransaction()
         tx.vin.append(self.valid_txin)
         tx.vin.append(self.valid_txin)
         tx.vout.append(CTxOut(1, basic_p2sh))
         tx.calc_sha256()
         return tx
 
 
 class NonexistentInput(BadTxTemplate):
     # Added as an orphan tx.
     reject_reason = None
     expect_disconnect = False
 
     def get_tx(self):
         tx = CTransaction()
         tx.vin.append(
             CTxIn(
                 COutPoint(
                     self.spend_tx.sha256 +
                     1,
                     0),
                 b"",
                 0xffffffff))
         tx.vin.append(self.valid_txin)
         tx.vout.append(CTxOut(1, basic_p2sh))
         tx.calc_sha256()
         return tx
 
 
 class SpendTooMuch(BadTxTemplate):
     reject_reason = 'bad-txns-in-belowout'
     expect_disconnect = True
 
     def get_tx(self):
         return create_tx_with_script(
             self.spend_tx, 0, script_pub_key=basic_p2sh, amount=(self.spend_avail + 1))
 
 
 class CreateNegative(BadTxTemplate):
     reject_reason = 'bad-txns-vout-negative'
     expect_disconnect = True
 
     def get_tx(self):
         return create_tx_with_script(self.spend_tx, 0, amount=-1)
 
 
 class CreateTooLarge(BadTxTemplate):
     reject_reason = 'bad-txns-vout-toolarge'
     expect_disconnect = True
 
     def get_tx(self):
         return create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY + 1)
 
 
 class CreateSumTooLarge(BadTxTemplate):
     reject_reason = 'bad-txns-txouttotal-toolarge'
     expect_disconnect = True
 
     def get_tx(self):
         tx = create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY)
         tx.vout = [tx.vout[0]] * 2
         tx.calc_sha256()
         return tx
 
 
 class InvalidOPIFConstruction(BadTxTemplate):
     reject_reason = "mandatory-script-verify-flag-failed (Invalid OP_IF construction)"
     expect_disconnect = True
     valid_in_block = True
 
     def get_tx(self):
         return create_tx_with_script(
             self.spend_tx, 0, script_sig=b'\x64' * 35,
             amount=(self.spend_avail // 2))
 
 
 def getDisabledOpcodeTemplate(opcode):
     """ Creates disabled opcode tx template class"""
 
     def get_tx(self):
         tx = CTransaction()
         vin = self.valid_txin
         vin.scriptSig = CScript([opcode])
         tx.vin.append(vin)
         tx.vout.append(CTxOut(1, basic_p2sh))
         pad_tx(tx)
         tx.calc_sha256()
         return tx
 
     return type('DisabledOpcode_' + str(opcode), (BadTxTemplate,), {
         'reject_reason': "disabled opcode",
         'expect_disconnect': True,
         'get_tx': get_tx,
         'valid_in_block': True
     })
 
 
 # Disabled opcode tx templates (CVE-2010-5137)
 DisabledOpcodeTemplates = [getDisabledOpcodeTemplate(opcode) for opcode in [
     OP_INVERT,
     OP_2MUL,
     OP_2DIV,
     OP_MUL,
     OP_LSHIFT,
     OP_RSHIFT]]
 
 
 def iter_all_templates():
     """Iterate through all bad transaction template types."""
     return BadTxTemplate.__subclasses__()
diff --git a/test/functional/example_test.py b/test/functional/example_test.py
index c7b506c63..2bfe71277 100755
--- a/test/functional/example_test.py
+++ b/test/functional/example_test.py
@@ -1,235 +1,224 @@
 #!/usr/bin/env python3
 # Copyright (c) 2017-2019 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """An example functional test
 
 The module-level docstring should include a high-level description of
 what the test is doing. It's the first thing people see when they open
 the file and should give the reader information about *what* the test
 is testing and *how* it's being tested
 """
 # Imports should be in PEP8 ordering (std library first, then third party
 # libraries then local imports).
 from collections import defaultdict
 
 # Avoid wildcard * imports if possible
-from test_framework.blocktools import (create_block, create_coinbase)
-from test_framework.messages import (
-    CInv,
-    MSG_BLOCK,
-    msg_block,
-    msg_getdata
-)
-from test_framework.p2p import (
-    P2PInterface,
-    p2p_lock,
-)
+from test_framework.blocktools import create_block, create_coinbase
+from test_framework.messages import MSG_BLOCK, CInv, msg_block, msg_getdata
+from test_framework.p2p import P2PInterface, p2p_lock
 from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import (
-    assert_equal,
-    connect_nodes,
-)
+from test_framework.util import assert_equal, connect_nodes
 
 # P2PInterface is a class containing callbacks to be executed when a P2P
 # message is received from the node-under-test. Subclass P2PInterface and
 # override the on_*() methods if you need custom behaviour.
 
 
 class BaseNode(P2PInterface):
     def __init__(self):
         """Initialize the P2PInterface
 
         Used to initialize custom properties for the Node that aren't
         included by default in the base class. Be aware that the P2PInterface
         base class already stores a counter for each P2P message type and the
         last received message of each type, which should be sufficient for the
         needs of most tests.
 
         Call super().__init__() first for standard initialization and then
         initialize custom properties."""
         super().__init__()
         # Stores a dictionary of all blocks received
         self.block_receive_map = defaultdict(int)
 
     def on_block(self, message):
         """Override the standard on_block callback
 
         Store the hash of a received block in the dictionary."""
         message.block.calc_sha256()
         self.block_receive_map[message.block.sha256] += 1
 
     def on_inv(self, message):
         """Override the standard on_inv callback"""
         pass
 
 
 def custom_function():
     """Do some custom behaviour
 
     If this function is more generally useful for other tests, consider
     moving it to a module in test_framework."""
     # self.log.info("running custom_function")  # Oops! Can't run self.log
     # outside the BitcoinTestFramework
     pass
 
 
 class ExampleTest(BitcoinTestFramework):
     # Each functional test is a subclass of the BitcoinTestFramework class.
 
     # Override the set_test_params(), skip_test_if_missing_module(), add_options(), setup_chain(), setup_network()
     # and setup_nodes() methods to customize the test setup as required.
 
     def set_test_params(self):
         """Override test parameters for your individual test.
 
         This method must be overridden and num_nodes must be exlicitly set."""
         self.setup_clean_chain = True
         self.num_nodes = 3
         # Use self.extra_args to change command-line arguments for the nodes
         self.extra_args = [[], ["-logips"], []]
 
         # self.log.info("I've finished set_test_params")  # Oops! Can't run
         # self.log before run_test()
 
     # Use skip_test_if_missing_module() to skip the test if your test requires certain modules to be present.
     # This test uses generate which requires wallet to be compiled
     def skip_test_if_missing_module(self):
         self.skip_if_no_wallet()
 
     # Use add_options() to add specific command-line options for your test.
     # In practice this is not used very much, since the tests are mostly written
     # to be run in automated environments without command-line options.
     # def add_options()
     #     pass
 
     # Use setup_chain() to customize the node data directories. In practice
     # this is not used very much since the default behaviour is almost always
     # fine
     # def setup_chain():
     #     pass
 
     def setup_network(self):
         """Setup the test network topology
 
         Often you won't need to override this, since the standard network topology
         (linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests.
 
         If you do override this method, remember to start the nodes, assign
         them to self.nodes, connect them and then sync."""
 
         self.setup_nodes()
 
         # In this test, we're not connecting node2 to node0 or node1. Calls to
         # sync_all() should not include node2, since we're not expecting it to
         # sync.
         connect_nodes(self.nodes[0], self.nodes[1])
         self.sync_all(self.nodes[0:2])
 
     # Use setup_nodes() to customize the node start behaviour (for example if
     # you don't want to start all nodes at the start of the test).
     # def setup_nodes():
     #     pass
 
     def custom_method(self):
         """Do some custom behaviour for this test
 
         Define it in a method here because you're going to use it repeatedly.
         If you think it's useful in general, consider moving it to the base
         BitcoinTestFramework class so other tests can use it."""
 
         self.log.info("Running custom_method")
 
     def run_test(self):
         """Main test logic"""
 
         # Create P2P connections will wait for a verack to make sure the
         # connection is fully up
         self.nodes[0].add_p2p_connection(BaseNode())
 
         # Generating a block on one of the nodes will get us out of IBD
         blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
         self.sync_all(self.nodes[0:2])
 
         # Notice above how we called an RPC by calling a method with the same
         # name on the node object. Notice also how we used a keyword argument
         # to specify a named RPC argument. Neither of those are defined on the
         # node object. Instead there's some __getattr__() magic going on under
         # the covers to dispatch unrecognised attribute calls to the RPC
         # interface.
 
         # Logs are nice. Do plenty of them. They can be used in place of comments for
         # breaking the test into sub-sections.
         self.log.info("Starting test!")
 
         self.log.info("Calling a custom function")
         custom_function()
 
         self.log.info("Calling a custom method")
         self.custom_method()
 
         self.log.info("Create some blocks")
         self.tip = int(self.nodes[0].getbestblockhash(), 16)
         self.block_time = self.nodes[0].getblock(
             self.nodes[0].getbestblockhash())['time'] + 1
 
         height = self.nodes[0].getblockcount()
 
         for _ in range(10):
             # Use the blocktools functionality to manually build a block.
             # Calling the generate() rpc is easier, but this allows us to exactly
             # control the blocks and transactions.
             block = create_block(
                 self.tip, create_coinbase(
                     height + 1), self.block_time)
             block.solve()
             block_message = msg_block(block)
             # Send message is used to send a P2P message to the node over our
             # P2PInterface
             self.nodes[0].p2p.send_message(block_message)
             self.tip = block.sha256
             blocks.append(self.tip)
             self.block_time += 1
             height += 1
 
         self.log.info(
             "Wait for node1 to reach current tip (height 11) using RPC")
         self.nodes[1].waitforblockheight(11)
 
         self.log.info("Connect node2 and node1")
         connect_nodes(self.nodes[1], self.nodes[2])
 
         self.log.info("Wait for node2 to receive all the blocks from node1")
         self.sync_all()
 
         self.log.info("Add P2P connection to node2")
         self.nodes[0].disconnect_p2ps()
 
         self.nodes[2].add_p2p_connection(BaseNode())
 
         self.log.info("Test that node2 propagates all the blocks to us")
 
         getdata_request = msg_getdata()
         for block in blocks:
             getdata_request.inv.append(CInv(MSG_BLOCK, block))
         self.nodes[2].p2p.send_message(getdata_request)
 
         # wait_until() will loop until a predicate condition is met. Use it to test properties of the
         # P2PInterface objects.
         self.nodes[2].p2p.wait_until(
             lambda: sorted(blocks) == sorted(list(
                 self.nodes[2].p2p.block_receive_map.keys())),
             timeout=5)
 
         self.log.info("Check that each block was received only once")
         # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
         # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
         # and synchronization issues. Note p2p.wait_until() acquires this
         # global lock internally when testing the predicate.
         with p2p_lock:
             for block in self.nodes[2].p2p.block_receive_map.values():
                 assert_equal(block, 1)
 
 
 if __name__ == '__main__':
     ExampleTest().main()
diff --git a/test/functional/test_framework/address.py b/test/functional/test_framework/address.py
index 2235adb52..890a92c34 100644
--- a/test/functional/test_framework/address.py
+++ b/test/functional/test_framework/address.py
@@ -1,133 +1,133 @@
 #!/usr/bin/env python3
 # Copyright (c) 2016-2019 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Encode and decode BASE58, P2PKH and P2SH addresses."""
 
 import unittest
 
 from .script import CScript, hash160, hash256
-from .util import hex_str_to_bytes, assert_equal
+from .util import assert_equal, hex_str_to_bytes
 
 ADDRESS_ECREG_UNSPENDABLE = 'ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqcrl5mqkt'
 ADDRESS_ECREG_UNSPENDABLE_DESCRIPTOR = 'addr(ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqcrl5mqkt)#u6xx93xc'
 
 chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
 
 
 def byte_to_base58(b, version):
     result = ''
     str = b.hex()
     str = chr(version).encode('latin-1').hex() + str
     checksum = hash256(hex_str_to_bytes(str)).hex()
     str += checksum[:8]
     value = int('0x' + str, 0)
     while value > 0:
         result = chars[value % 58] + result
         value //= 58
     while (str[:2] == '00'):
         result = chars[0] + result
         str = str[2:]
     return result
 
 
 def base58_to_byte(s, verify_checksum=True):
     if not s:
         return b''
     n = 0
     for c in s:
         n *= 58
         assert c in chars
         digit = chars.index(c)
         n += digit
     h = '{:x}'.format(n)
     if len(h) % 2:
         h = '0' + h
     res = n.to_bytes((n.bit_length() + 7) // 8, 'big')
     pad = 0
     for c in s:
         if c == chars[0]:
             pad += 1
         else:
             break
     res = b'\x00' * pad + res
     if verify_checksum:
         assert_equal(hash256(res[:-4])[:4], res[-4:])
 
     return res[1:-4], int(res[0])
 
 
 def keyhash_to_p2pkh(hash, main=False):
     assert (len(hash) == 20)
     version = 0 if main else 111
     return byte_to_base58(hash, version)
 
 
 def scripthash_to_p2sh(hash, main=False):
     assert (len(hash) == 20)
     version = 5 if main else 196
     return byte_to_base58(hash, version)
 
 
 def key_to_p2pkh(key, main=False):
     key = check_key(key)
     return keyhash_to_p2pkh(hash160(key), main)
 
 
 def script_to_p2sh(script, main=False):
     script = check_script(script)
     return scripthash_to_p2sh(hash160(script), main)
 
 
 def check_key(key):
     if (isinstance(key, str)):
         key = hex_str_to_bytes(key)  # Assuming this is hex string
     if (isinstance(key, bytes) and (len(key) == 33 or len(key) == 65)):
         return key
     assert False
 
 
 def check_script(script):
     if (isinstance(script, str)):
         script = hex_str_to_bytes(script)  # Assuming this is hex string
     if (isinstance(script, bytes) or isinstance(script, CScript)):
         return script
     assert False
 
 
 class TestFrameworkScript(unittest.TestCase):
     def test_base58encodedecode(self):
         def check_base58(data, version):
             self.assertEqual(
                 base58_to_byte(byte_to_base58(data, version)),
                 (data, version))
 
         check_base58(
             b'\x1f\x8e\xa1p*{\xd4\x94\x1b\xca\tA\xb8R\xc4\xbb\xfe\xdb.\x05',
             111)
         check_base58(
             b':\x0b\x05\xf4\xd7\xf6l;\xa7\x00\x9fE50)l\x84\\\xc9\xcf', 111)
         check_base58(
             b'A\xc1\xea\xf1\x11\x80%Y\xba\xd6\x1b`\xd6+\x1f\x89|c\x92\x8a',
             111)
         check_base58(
             b'\0A\xc1\xea\xf1\x11\x80%Y\xba\xd6\x1b`\xd6+\x1f\x89|c\x92\x8a',
             111)
         check_base58(
             b'\0\0A\xc1\xea\xf1\x11\x80%Y\xba\xd6\x1b`\xd6+\x1f\x89|c\x92\x8a',
             111)
         check_base58(
             b'\0\0\0A\xc1\xea\xf1\x11\x80%Y\xba\xd6\x1b`\xd6+\x1f\x89|c\x92\x8a',
             111)
         check_base58(
             b'\x1f\x8e\xa1p*{\xd4\x94\x1b\xca\tA\xb8R\xc4\xbb\xfe\xdb.\x05', 0)
         check_base58(
             b':\x0b\x05\xf4\xd7\xf6l;\xa7\x00\x9fE50)l\x84\\\xc9\xcf', 0)
         check_base58(
             b'A\xc1\xea\xf1\x11\x80%Y\xba\xd6\x1b`\xd6+\x1f\x89|c\x92\x8a', 0)
         check_base58(
             b'\0A\xc1\xea\xf1\x11\x80%Y\xba\xd6\x1b`\xd6+\x1f\x89|c\x92\x8a', 0)
         check_base58(
             b'\0\0A\xc1\xea\xf1\x11\x80%Y\xba\xd6\x1b`\xd6+\x1f\x89|c\x92\x8a', 0)
         check_base58(
             b'\0\0\0A\xc1\xea\xf1\x11\x80%Y\xba\xd6\x1b`\xd6+\x1f\x89|c\x92\x8a', 0)
diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py
index 297ed9650..03304617d 100644
--- a/test/functional/test_framework/authproxy.py
+++ b/test/functional/test_framework/authproxy.py
@@ -1,231 +1,231 @@
 #!/usr/bin/env python3
 # Copyright (c) 2011 Jeff Garzik
 #
 # Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
 #
 # Copyright (c) 2007 Jan-Klaas Kollhof
 #
 # This file is part of jsonrpc.
 #
 # jsonrpc is free software; you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation; either version 2.1 of the License, or
 # (at your option) any later version.
 #
 # This software is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with this software; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """HTTP proxy for opening RPC connection to bitcoind.
 
 AuthServiceProxy has the following improvements over python-jsonrpc's
 ServiceProxy class:
 
 - HTTP connections persist for the life of the AuthServiceProxy object
   (if server supports HTTP/1.1)
 - sends protocol 'version', per JSON-RPC 1.1
 - sends proper, incrementing 'id'
 - sends Basic HTTP authentication headers
 - parses all JSON numbers that look like floats as Decimal
 - uses standard Python json lib
 """
 
 import base64
 import decimal
-from http import HTTPStatus
 import http.client
 import json
 import logging
 import os
 import socket
 import time
 import urllib.parse
+from http import HTTPStatus
 
 HTTP_TIMEOUT = 30
 USER_AGENT = "AuthServiceProxy/0.1"
 
 log = logging.getLogger("BitcoinRPC")
 
 
 class JSONRPCException(Exception):
     def __init__(self, rpc_error, http_status=None):
         try:
             errmsg = '{} ({})'.format(rpc_error['message'], rpc_error['code'])
         except (KeyError, TypeError):
             errmsg = ''
         super().__init__(errmsg)
         self.error = rpc_error
         self.http_status = http_status
 
 
 def EncodeDecimal(o):
     if isinstance(o, decimal.Decimal):
         return str(o)
     raise TypeError(repr(o) + " is not JSON serializable")
 
 
 class AuthServiceProxy():
     __id_count = 0
 
     # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
     def __init__(self, service_url, service_name=None,
                  timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
         self.__service_url = service_url
         self._service_name = service_name
         self.ensure_ascii = ensure_ascii  # can be toggled on the fly by tests
         self.__url = urllib.parse.urlparse(service_url)
         user = None if self.__url.username is None else self.__url.username.encode(
             'utf8')
         passwd = None if self.__url.password is None else self.__url.password.encode(
             'utf8')
         authpair = user + b':' + passwd
         self.__auth_header = b'Basic ' + base64.b64encode(authpair)
         self.timeout = timeout
         self._set_conn(connection)
 
     def __getattr__(self, name):
         if name.startswith('__') and name.endswith('__'):
             # Python internal stuff
             raise AttributeError
         if self._service_name is not None:
             name = "{}.{}".format(self._service_name, name)
         return AuthServiceProxy(
             self.__service_url, name, connection=self.__conn)
 
     def _request(self, method, path, postdata):
         '''
         Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
         This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
         '''
         headers = {'Host': self.__url.hostname,
                    'User-Agent': USER_AGENT,
                    'Authorization': self.__auth_header,
                    'Content-type': 'application/json'}
         if os.name == 'nt':
             # Windows somehow does not like to re-use connections
             # TODO: Find out why the connection would disconnect occasionally
             # and make it reusable on Windows
             # Avoid "ConnectionAbortedError: [WinError 10053] An established
             # connection was aborted by the software in your host machine"
             self._set_conn()
         try:
             self.__conn.request(method, path, postdata, headers)
             return self._get_response()
         except (BrokenPipeError, ConnectionResetError):
             # Python 3.5+ raises BrokenPipeError when the connection was reset
             # ConnectionResetError happens on FreeBSD
             self.__conn.close()
             self.__conn.request(method, path, postdata, headers)
             return self._get_response()
         except OSError as e:
             retry = (
                 '[WinError 10053] An established connection was aborted by the software in your host machine' in str(e))
             # Workaround for a bug on macOS. See
             # https://bugs.python.org/issue33450
             retry = retry or (
                 '[Errno 41] Protocol wrong type for socket' in str(e))
             if retry:
                 self.__conn.close()
                 self.__conn.request(method, path, postdata, headers)
                 return self._get_response()
             else:
                 raise
 
     def get_request(self, *args, **argsn):
         AuthServiceProxy.__id_count += 1
 
         log.debug("-{}-> {} {}".format(
             AuthServiceProxy.__id_count,
             self._service_name,
             json.dumps(
                 args or argsn,
                 default=EncodeDecimal,
                 ensure_ascii=self.ensure_ascii),
         ))
         if args and argsn:
             raise ValueError(
                 'Cannot handle both named and positional arguments')
         return {'version': '1.1',
                 'method': self._service_name,
                 'params': args or argsn,
                 'id': AuthServiceProxy.__id_count}
 
     def __call__(self, *args, **argsn):
         postdata = json.dumps(self.get_request(
             *args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
         response, status = self._request(
             'POST', self.__url.path, postdata.encode('utf-8'))
         if response['error'] is not None:
             raise JSONRPCException(response['error'], status)
         elif 'result' not in response:
             raise JSONRPCException({
                 'code': -343, 'message': 'missing JSON-RPC result'}, status)
         elif status != HTTPStatus.OK:
             raise JSONRPCException({
                 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status)
         else:
             return response['result']
 
     def batch(self, rpc_call_list):
         postdata = json.dumps(
             list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
         log.debug("--> " + postdata)
         response, status = self._request(
             'POST', self.__url.path, postdata.encode('utf-8'))
         if status != HTTPStatus.OK:
             raise JSONRPCException({
                 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status)
         return response
 
     def _get_response(self):
         req_start_time = time.time()
         try:
             http_response = self.__conn.getresponse()
         except socket.timeout:
             raise JSONRPCException({
                 'code': -344,
                 'message': '{!r} RPC took longer than {} seconds. Consider '
                            'using larger timeout for calls that take '
                            'longer to return.'.format(self._service_name,
                                                       self.__conn.timeout)})
         if http_response is None:
             raise JSONRPCException({
                 'code': -342, 'message': 'missing HTTP response from server'})
 
         content_type = http_response.getheader('Content-Type')
         if content_type != 'application/json':
             raise JSONRPCException(
                 {'code': -342,
                  'message': 'non-JSON HTTP response with \'{} {}\' from server'.format(
                      http_response.status, http_response.reason)},
                 http_response.status)
 
         responsedata = http_response.read().decode('utf8')
         response = json.loads(responsedata, parse_float=decimal.Decimal)
         elapsed = time.time() - req_start_time
         if "error" in response and response["error"] is None:
             log.debug("<-{}- [{:.6f}] {}".format(response["id"], elapsed, json.dumps(
                 response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
         else:
             log.debug("<-- [{:.6f}] {}".format(elapsed, responsedata))
         return response, http_response.status
 
     def __truediv__(self, relative_uri):
         return AuthServiceProxy("{}/{}".format(self.__service_url,
                                                relative_uri), self._service_name, connection=self.__conn)
 
     def _set_conn(self, connection=None):
         port = 80 if self.__url.port is None else self.__url.port
         if connection:
             self.__conn = connection
             self.timeout = connection.timeout
         elif self.__url.scheme == 'https':
             self.__conn = http.client.HTTPSConnection(
                 self.__url.hostname, port, timeout=self.timeout)
         else:
             self.__conn = http.client.HTTPConnection(
                 self.__url.hostname, port, timeout=self.timeout)
diff --git a/test/functional/test_framework/avatools.py b/test/functional/test_framework/avatools.py
index 698b6da6d..c1bf00049 100644
--- a/test/functional/test_framework/avatools.py
+++ b/test/functional/test_framework/avatools.py
@@ -1,275 +1,271 @@
 #!/usr/bin/env python3
 # Copyright (c) 2021 The Bitcoin ABC developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Utilities for avalanche tests."""
 
 import struct
-from typing import Any, Optional, List, Dict
+from typing import Any, Dict, List, Optional
 
 from .authproxy import JSONRPCException
 from .key import ECKey
 from .messages import (
+    NODE_AVALANCHE,
+    NODE_NETWORK,
     AvalancheDelegation,
     AvalancheProof,
     AvalancheResponse,
     CInv,
     CTransaction,
     FromHex,
+    TCPAvalancheResponse,
+    ToHex,
     hash256,
     msg_avahello,
     msg_avapoll,
     msg_tcpavaresponse,
-    NODE_AVALANCHE,
-    NODE_NETWORK,
-    TCPAvalancheResponse,
-    ToHex,
 )
 from .p2p import P2PInterface, p2p_lock
 from .test_node import TestNode
-from .util import (
-    assert_equal,
-    satoshi_round,
-    wait_until_helper,
-)
+from .util import assert_equal, satoshi_round, wait_until_helper
 from .wallet_util import bytes_to_wif
 
 
 def create_coinbase_stakes(
         node: TestNode,
         blockhashes: List[str],
         priv_key: str,
         amount: Optional[str] = None) -> List[Dict[str, Any]]:
     """Returns a list of dictionaries representing stakes, in a format
     compatible with the buildavalancheproof RPC, using only coinbase
     transactions.
 
     :param node: Test node used to get the block and coinbase data.
     :param blockhashes: List of block hashes, whose coinbase tx will be used
         as a stake.
     :param priv_key: Private key controlling the coinbase UTXO
     :param amount: If specified, this overwrites the amount information
         in the coinbase dicts.
     """
     blocks = [node.getblock(h, 2) for h in blockhashes]
     coinbases = [
         {
             'height': b['height'],
             'txid': b['tx'][0]['txid'],
             'n': 0,
             'value': b['tx'][0]['vout'][0]['value'],
         } for b in blocks
     ]
 
     return [{
         'txid': coinbase['txid'],
         'vout': coinbase['n'],
         'amount': amount or coinbase['value'],
         'height': coinbase['height'],
         'iscoinbase': True,
         'privatekey': priv_key,
     } for coinbase in coinbases]
 
 
 def get_utxos_in_blocks(node: TestNode, blockhashes: List[str]) -> List[Dict]:
     """Return all UTXOs in the specified list of blocks.
     """
     utxos = filter(
         lambda u: node.gettransaction(u["txid"])["blockhash"] in blockhashes,
         node.listunspent())
     return list(utxos)
 
 
 def create_stakes(
         node: TestNode, blockhashes: List[str], count: int
 ) -> List[Dict[str, Any]]:
     """
     Create a list of stakes by splitting existing UTXOs from a specified list
     of blocks into 10 new coins.
 
     This function can generate more valid stakes than `get_coinbase_stakes`
     does, because on the regtest chain halving happens every 150 blocks so
     the coinbase amount is below the dust threshold after only 900 blocks.
 
     :param node: Test node used to generate blocks and send transactions
     :param blockhashes: List of block hashes whose UTXOs will be split.
     :param count: Number of stakes to return.
     """
     assert 10 * len(blockhashes) >= count
     utxos = get_utxos_in_blocks(node, blockhashes)
 
     addresses = [node.getnewaddress() for _ in range(10)]
     private_keys = {addr: node.dumpprivkey(addr) for addr in addresses}
 
     for u in utxos:
         inputs = [{"txid": u["txid"], "vout": u["vout"]}]
         outputs = {
             addr: satoshi_round(u['amount'] / 10) for addr in addresses}
         raw_tx = node.createrawtransaction(inputs, outputs)
         ctx = FromHex(CTransaction(), raw_tx)
         ctx.vout[0].nValue -= node.calculate_fee(ctx)
         signed_tx = node.signrawtransactionwithwallet(ToHex(ctx))["hex"]
         node.sendrawtransaction(signed_tx)
 
     # confirm the transactions
     new_blocks = []
     while node.getmempoolinfo()['size'] > 0:
         new_blocks += node.generate(1)
 
     utxos = get_utxos_in_blocks(node, new_blocks)
     stakes = []
     # cache block heights
     heights = {}
     for utxo in utxos[:count]:
         blockhash = node.gettransaction(utxo["txid"])["blockhash"]
         if blockhash not in heights:
             heights[blockhash] = node.getblock(blockhash, 1)["height"]
         stakes.append({
             'txid': utxo['txid'],
             'vout': utxo['vout'],
             'amount': utxo['amount'],
             'iscoinbase': utxo['label'] == "coinbase",
             'height': heights[blockhash],
             'privatekey': private_keys[utxo["address"]],
         })
 
     return stakes
 
 
 def get_proof_ids(node):
     return [FromHex(AvalancheProof(), peer['proof']
                     ).proofid for peer in node.getavalanchepeerinfo()]
 
 
 def wait_for_proof(node, proofid_hex, timeout=60, expect_orphan=None):
     """
     Wait for the proof to be known by the node. If expect_orphan is set, the
     proof should match the orphan state, otherwise it's a don't care parameter.
     """
     def proof_found():
         try:
             wait_for_proof.is_orphan = node.getrawavalancheproof(proofid_hex)[
                 "orphan"]
             return True
         except JSONRPCException:
             return False
     wait_until_helper(proof_found, timeout=timeout)
 
     if expect_orphan is not None:
         assert_equal(expect_orphan, wait_for_proof.is_orphan)
 
 
 class AvaP2PInterface(P2PInterface):
     """P2PInterface with avalanche capabilities"""
 
     def __init__(self):
         self.round = 0
         self.avahello = None
         self.avaresponses = []
         self.avapolls = []
         self.nodeid: Optional[int] = None
         super().__init__()
 
     def peer_connect(self, *args, **kwargs):
         create_conn = super().peer_connect(*args, **kwargs)
 
         # Save the nonce and extra entropy so they can be reused later.
         self.local_nonce = self.on_connection_send_msg.nNonce
         self.local_extra_entropy = self.on_connection_send_msg.nExtraEntropy
 
         return create_conn
 
     def on_version(self, message):
         super().on_version(message)
 
         # Save the nonce and extra entropy so they can be reused later.
         self.remote_nonce = message.nNonce
         self.remote_extra_entropy = message.nExtraEntropy
 
     def on_avaresponse(self, message):
         self.avaresponses.append(message.response)
 
     def on_avapoll(self, message):
         self.avapolls.append(message.poll)
 
     def on_avahello(self, message):
         assert(self.avahello is None)
         self.avahello = message
 
     def send_avaresponse(self, round, votes, privkey):
         response = AvalancheResponse(round, 0, votes)
         sig = privkey.sign_schnorr(response.get_hash())
         msg = msg_tcpavaresponse()
         msg.response = TCPAvalancheResponse(response, sig)
         self.send_message(msg)
 
     def wait_for_avaresponse(self, timeout=5):
         self.wait_until(
             lambda: len(self.avaresponses) > 0,
             timeout=timeout)
 
         with p2p_lock:
             return self.avaresponses.pop(0)
 
     def send_poll(self, hashes):
         msg = msg_avapoll()
         msg.poll.round = self.round
         self.round += 1
         for h in hashes:
             msg.poll.invs.append(CInv(2, h))
         self.send_message(msg)
 
     def get_avapoll_if_available(self):
         with p2p_lock:
             return self.avapolls.pop(0) if len(self.avapolls) > 0 else None
 
     def wait_for_avahello(self, timeout=5):
         self.wait_until(
             lambda: self.avahello is not None,
             timeout=timeout)
 
         with p2p_lock:
             return self.avahello
 
     def send_avahello(self, delegation_hex: str, delegated_privkey: ECKey):
         delegation = FromHex(AvalancheDelegation(), delegation_hex)
         local_sighash = hash256(
             delegation.getid() +
             struct.pack("<QQQQ", self.local_nonce, self.remote_nonce,
                         self.local_extra_entropy, self.remote_extra_entropy))
 
         msg = msg_avahello()
         msg.hello.delegation = delegation
         msg.hello.sig = delegated_privkey.sign_schnorr(local_sighash)
         self.send_message(msg)
 
         return delegation.proofid
 
 
 def get_ava_p2p_interface(
         node: TestNode,
         services=NODE_NETWORK | NODE_AVALANCHE) -> AvaP2PInterface:
     """Build and return an AvaP2PInterface connected to the specified
     TestNode.
     """
     n = AvaP2PInterface()
     node.add_p2p_connection(
         n, services=services)
     n.wait_for_verack()
     n.nodeid = node.getpeerinfo()[-1]['id']
 
     return n
 
 
 def gen_proof(node, coinbase_utxos=1):
     blockhashes = node.generate(coinbase_utxos)
 
     privkey = ECKey()
     privkey.generate()
 
     stakes = create_coinbase_stakes(
         node, blockhashes, node.get_deterministic_priv_key().key)
     proof_hex = node.buildavalancheproof(
         42, 2000000000, bytes_to_wif(privkey.get_bytes()), stakes)
 
     return privkey, FromHex(AvalancheProof(), proof_hex)
diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py
index 47dc3ff10..6bad61fae 100644
--- a/test/functional/test_framework/blocktools.py
+++ b/test/functional/test_framework/blocktools.py
@@ -1,238 +1,238 @@
 #!/usr/bin/env python3
 # Copyright (c) 2015-2019 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Utilities for manipulating blocks and transactions."""
 
 import unittest
 
-from .script import (
-    CScript,
-    CScriptNum,
-    CScriptOp,
-    OP_1,
-    OP_CHECKSIG,
-    OP_DUP,
-    OP_EQUALVERIFY,
-    OP_HASH160,
-    OP_RETURN,
-    OP_TRUE,
-)
 from .messages import (
-    CBlock,
     COIN,
     XEC,
+    CBlock,
     COutPoint,
     CTransaction,
     CTxIn,
     CTxOut,
     FromHex,
     ToHex,
 )
+from .script import (
+    OP_1,
+    OP_CHECKSIG,
+    OP_DUP,
+    OP_EQUALVERIFY,
+    OP_HASH160,
+    OP_RETURN,
+    OP_TRUE,
+    CScript,
+    CScriptNum,
+    CScriptOp,
+)
 from .txtools import pad_tx
 from .util import assert_equal, satoshi_round
 
 # Genesis block time (regtest)
 TIME_GENESIS_BLOCK = 1296688602
 
 
 def create_block(hashprev, coinbase, ntime=None, *, version=1):
     """Create a block (with regtest difficulty)."""
     block = CBlock()
     block.nVersion = version
     if ntime is None:
         import time
         block.nTime = int(time.time() + 600)
     else:
         block.nTime = ntime
     block.hashPrevBlock = hashprev
     # difficulty retargeting is disabled in REGTEST chainparams
     block.nBits = 0x207fffff
     block.vtx.append(coinbase)
     block.hashMerkleRoot = block.calc_merkle_root()
     block.calc_sha256()
     return block
 
 
 def make_conform_to_ctor(block):
     for tx in block.vtx:
         tx.rehash()
     block.vtx = [block.vtx[0]] + \
         sorted(block.vtx[1:], key=lambda tx: tx.get_id())
 
 
 def script_BIP34_coinbase_height(height):
     if height <= 16:
         res = CScriptOp.encode_op_n(height)
         # Append dummy to increase scriptSig size above 2
         # (see bad-cb-length consensus rule)
         return CScript([res, OP_1])
     return CScript([CScriptNum(height)])
 
 
 def create_coinbase(height, pubkey=None):
     """Create a coinbase transaction, assuming no miner fees.
 
     If pubkey is passed in, the coinbase output will be a P2PK output;
     otherwise an anyone-can-spend output."""
     coinbase = CTransaction()
     coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
                               script_BIP34_coinbase_height(height),
                               0xffffffff))
     coinbaseoutput = CTxOut()
     coinbaseoutput.nValue = 50 * COIN
     halvings = int(height / 150)  # regtest
     coinbaseoutput.nValue >>= halvings
     if (pubkey is not None):
         coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
     else:
         coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
     coinbase.vout = [coinbaseoutput]
 
     # Make sure the coinbase is at least 100 bytes
     pad_tx(coinbase)
 
     coinbase.calc_sha256()
     return coinbase
 
 
 def create_tx_with_script(prevtx, n, script_sig=b"", *,
                           amount, script_pub_key=CScript()):
     """Return one-input, one-output transaction object
        spending the prevtx's n-th output with the given amount.
 
        Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output.
     """
     tx = CTransaction()
     assert(n < len(prevtx.vout))
     tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff))
     tx.vout.append(CTxOut(amount, script_pub_key))
     pad_tx(tx)
     tx.calc_sha256()
     return tx
 
 
 def create_transaction(node, txid, to_address, *, amount):
     """ Return signed transaction spending the first output of the
         input txid. Note that the node must be able to sign for the
         output that is being spent, and the node must not be running
         multiple wallets.
     """
     raw_tx = create_raw_transaction(node, txid, to_address, amount=amount)
     tx = FromHex(CTransaction(), raw_tx)
     return tx
 
 
 def create_raw_transaction(node, txid, to_address, *, amount):
     """ Return raw signed transaction spending the first output of the
         input txid. Note that the node must be able to sign for the
         output that is being spent, and the node must not be running
         multiple wallets.
     """
     rawtx = node.createrawtransaction(
         inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount})
     signresult = node.signrawtransactionwithwallet(rawtx)
     assert_equal(signresult["complete"], True)
     return signresult['hex']
 
 
 def get_legacy_sigopcount_block(block, accurate=True):
     count = 0
     for tx in block.vtx:
         count += get_legacy_sigopcount_tx(tx, accurate)
     return count
 
 
 def get_legacy_sigopcount_tx(tx, accurate=True):
     count = 0
     for i in tx.vout:
         count += i.scriptPubKey.GetSigOpCount(accurate)
     for j in tx.vin:
         # scriptSig might be of type bytes, so convert to CScript for the
         # moment
         count += CScript(j.scriptSig).GetSigOpCount(accurate)
     return count
 
 
 def create_confirmed_utxos(node, count, age=101):
     """
     Helper to create at least "count" utxos
     """
     to_generate = int(0.5 * count) + age
     while to_generate > 0:
         node.generate(min(25, to_generate))
         to_generate -= 25
     utxos = node.listunspent()
     iterations = count - len(utxos)
     addr1 = node.getnewaddress()
     addr2 = node.getnewaddress()
     if iterations <= 0:
         return utxos
     for i in range(iterations):
         t = utxos.pop()
         inputs = []
         inputs.append({"txid": t["txid"], "vout": t["vout"]})
         outputs = {}
         outputs[addr1] = satoshi_round(t['amount'] / 2)
         outputs[addr2] = satoshi_round(t['amount'] / 2)
         raw_tx = node.createrawtransaction(inputs, outputs)
         ctx = FromHex(CTransaction(), raw_tx)
         fee = node.calculate_fee(ctx) // 2
         ctx.vout[0].nValue -= fee
         # Due to possible truncation, we go ahead and take another satoshi in
         # fees to ensure the transaction gets through
         ctx.vout[1].nValue -= fee + 1
         signed_tx = node.signrawtransactionwithwallet(ToHex(ctx))["hex"]
         node.sendrawtransaction(signed_tx)
 
     while (node.getmempoolinfo()['size'] > 0):
         node.generate(1)
 
     utxos = node.listunspent()
     assert len(utxos) >= count
     return utxos
 
 
 def mine_big_block(node, utxos=None):
     # generate a 66k transaction,
     # and 14 of them is close to the 1MB block limit
     num = 14
     utxos = utxos if utxos is not None else []
     if len(utxos) < num:
         utxos.clear()
         utxos.extend(node.listunspent())
     send_big_transactions(node, utxos, num, 100)
     node.generate(1)
 
 
 def send_big_transactions(node, utxos, num, fee_multiplier):
     from .cashaddr import decode
     txids = []
     padding = "1" * 512
     addrHash = decode(node.getnewaddress())[2]
 
     for _ in range(num):
         ctx = CTransaction()
         utxo = utxos.pop()
         txid = int(utxo['txid'], 16)
         ctx.vin.append(CTxIn(COutPoint(txid, int(utxo["vout"])), b""))
         ctx.vout.append(
             CTxOut(int(satoshi_round(utxo['amount'] * XEC)),
                    CScript([OP_DUP, OP_HASH160, addrHash, OP_EQUALVERIFY, OP_CHECKSIG])))
         for i in range(0, 127):
             ctx.vout.append(CTxOut(0, CScript(
                 [OP_RETURN, bytes(padding, 'utf-8')])))
         # Create a proper fee for the transaction to be mined
         ctx.vout[0].nValue -= int(fee_multiplier * node.calculate_fee(ctx))
         signresult = node.signrawtransactionwithwallet(
             ToHex(ctx), None, "NONE|FORKID")
         txid = node.sendrawtransaction(signresult["hex"], 0)
         txids.append(txid)
     return txids
 
 
 class TestFrameworkBlockTools(unittest.TestCase):
     def test_create_coinbase(self):
         height = 20
         coinbase_tx = create_coinbase(height=height)
         assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig), height)
diff --git a/test/functional/test_framework/coverage.py b/test/functional/test_framework/coverage.py
index 82d995287..c9b2238d0 100644
--- a/test/functional/test_framework/coverage.py
+++ b/test/functional/test_framework/coverage.py
@@ -1,111 +1,110 @@
 #!/usr/bin/env python3
 # Copyright (c) 2015-2016 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Utilities for doing coverage analysis on the RPC interface.
 
 Provides a way to track which RPC commands are exercised during
 testing.
 """
 
 import os
 
-
 REFERENCE_FILENAME = 'rpc_interface.txt'
 
 
 class AuthServiceProxyWrapper():
     """
     An object that wraps AuthServiceProxy to record specific RPC calls.
 
     """
 
     def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
         """
         Kwargs:
             auth_service_proxy_instance (AuthServiceProxy): the instance
                 being wrapped.
             coverage_logfile (str): if specified, write each service_name
                 out to a file when called.
 
         """
         self.auth_service_proxy_instance = auth_service_proxy_instance
         self.coverage_logfile = coverage_logfile
 
     def __getattr__(self, name):
         return_val = getattr(self.auth_service_proxy_instance, name)
         if not isinstance(return_val, type(self.auth_service_proxy_instance)):
             # If proxy getattr returned an unwrapped value, do the same here.
             return return_val
         return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
 
     def __call__(self, *args, **kwargs):
         """
         Delegates to AuthServiceProxy, then writes the particular RPC method
         called to a file.
 
         """
         return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
         self._log_call()
         return return_val
 
     def _log_call(self):
         rpc_method = self.auth_service_proxy_instance._service_name
 
         if self.coverage_logfile:
             with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
                 f.write("{}\n".format(rpc_method))
 
     def __truediv__(self, relative_uri):
         return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
                                        self.coverage_logfile)
 
     def get_request(self, *args, **kwargs):
         self._log_call()
         return self.auth_service_proxy_instance.get_request(*args, **kwargs)
 
 
 def get_filename(dirname, n_node):
     """
     Get a filename unique to the test process ID and node.
 
     This file will contain a list of RPC commands covered.
     """
     pid = str(os.getpid())
     return os.path.join(
         dirname, "coverage.pid{}.node{}.txt".format(pid, str(n_node)))
 
 
 def write_all_rpc_commands(dirname, node):
     """
     Write out a list of all RPC functions available in `bitcoin-cli` for
     coverage comparison. This will only happen once per coverage
     directory.
 
     Args:
         dirname (str): temporary test dir
         node (AuthServiceProxy): client
 
     Returns:
         bool. if the RPC interface file was written.
 
     """
     filename = os.path.join(dirname, REFERENCE_FILENAME)
 
     if os.path.isfile(filename):
         return False
 
     help_output = node.help().split('\n')
     commands = set()
 
     for line in help_output:
         line = line.strip()
 
         # Ignore blanks and headers
         if line and not line.startswith('='):
             commands.add("{}\n".format(line.split()[0]))
 
     with open(filename, 'w', encoding='utf8') as f:
         f.writelines(list(commands))
 
     return True
diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py
index 7f4fe5f7f..6fec4aa7f 100755
--- a/test/functional/test_framework/messages.py
+++ b/test/functional/test_framework/messages.py
@@ -1,2089 +1,2087 @@
 #!/usr/bin/env python3
 # Copyright (c) 2010 ArtForz -- public domain half-a-node
 # Copyright (c) 2012 Jeff Garzik
 # Copyright (c) 2010-2019 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Bitcoin test framework primitive and message structures
 
 CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
     data structures that should map to corresponding structures in
     bitcoin/primitives
 
 msg_block, msg_tx, msg_headers, etc.:
     data structures that represent network messages
 
 ser_*, deser_*: functions that handle serialization/deserialization.
 
 Classes use __slots__ to ensure extraneous attributes aren't accidentally added
 by tests, compromising their intended effect.
 """
-from codecs import encode
 import copy
 import hashlib
-from io import BytesIO
 import random
 import socket
 import struct
 import time
 import unittest
-
+from codecs import encode
+from io import BytesIO
 from typing import List
 
 from test_framework.siphash import siphash256
-from test_framework.util import hex_str_to_bytes, assert_equal
-
+from test_framework.util import assert_equal, hex_str_to_bytes
 
 MIN_VERSION_SUPPORTED = 60001
 # past bip-31 for ping/pong
 MY_VERSION = 70014
 MY_SUBVERSION = b"/python-p2p-tester:0.0.3/"
 # from version 70001 onwards, fRelay should be appended to version
 # messages (BIP37)
 MY_RELAY = 1
 
 MAX_LOCATOR_SZ = 101
 MAX_BLOCK_BASE_SIZE = 1000000
 MAX_BLOOM_FILTER_SIZE = 36000
 MAX_BLOOM_HASH_FUNCS = 50
 
 # 1,000,000 XEC in satoshis (legacy BCHA)
 COIN = 100000000
 # 1 XEC in satoshis
 XEC = 100
 MAX_MONEY = 21000000 * COIN
 
 # Maximum length of incoming protocol messages
 MAX_PROTOCOL_MESSAGE_LENGTH = 2 * 1024 * 1024
 MAX_HEADERS_RESULTS = 2000  # Number of headers sent in one getheaders result
 MAX_INV_SIZE = 50000  # Maximum number of entries in an 'inv' protocol message
 
 NODE_NETWORK = (1 << 0)
 NODE_GETUTXO = (1 << 1)
 NODE_BLOOM = (1 << 2)
 # NODE_WITNESS = (1 << 3)
 # NODE_XTHIN = (1 << 4) # removed in v0.22.12
 NODE_COMPACT_FILTERS = (1 << 6)
 NODE_NETWORK_LIMITED = (1 << 10)
 NODE_AVALANCHE = (1 << 24)
 
 MSG_TX = 1
 MSG_BLOCK = 2
 MSG_FILTERED_BLOCK = 3
 MSG_CMPCT_BLOCK = 4
 MSG_AVA_PROOF = 0x1f000001
 MSG_TYPE_MASK = 0xffffffff >> 2
 
 FILTER_TYPE_BASIC = 0
 
 # Serialization/deserialization tools
 
 
 def sha256(s):
     return hashlib.new('sha256', s).digest()
 
 
 def hash256(s):
     return sha256(sha256(s))
 
 
 def ser_compact_size(size):
     r = b""
     if size < 253:
         r = struct.pack("B", size)
     elif size < 0x10000:
         r = struct.pack("<BH", 253, size)
     elif size < 0x100000000:
         r = struct.pack("<BI", 254, size)
     else:
         r = struct.pack("<BQ", 255, size)
     return r
 
 
 def deser_compact_size(f):
     nit = struct.unpack("<B", f.read(1))[0]
     if nit == 253:
         nit = struct.unpack("<H", f.read(2))[0]
     elif nit == 254:
         nit = struct.unpack("<I", f.read(4))[0]
     elif nit == 255:
         nit = struct.unpack("<Q", f.read(8))[0]
     return nit
 
 
 def deser_string(f):
     nit = deser_compact_size(f)
     return f.read(nit)
 
 
 def ser_string(s):
     return ser_compact_size(len(s)) + s
 
 
 def deser_uint256(f):
     r = 0
     for i in range(8):
         t = struct.unpack("<I", f.read(4))[0]
         r += t << (i * 32)
     return r
 
 
 def ser_uint256(u):
     rs = b""
     for _ in range(8):
         rs += struct.pack("<I", u & 0xFFFFFFFF)
         u >>= 32
     return rs
 
 
 def uint256_from_str(s):
     r = 0
     t = struct.unpack("<IIIIIIII", s[:32])
     for i in range(8):
         r += t[i] << (i * 32)
     return r
 
 
 def uint256_from_compact(c):
     nbytes = (c >> 24) & 0xFF
     v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
     return v
 
 
 # deser_function_name: Allow for an alternate deserialization function on the
 # entries in the vector.
 def deser_vector(f, c, deser_function_name=None):
     nit = deser_compact_size(f)
     r = []
     for _ in range(nit):
         t = c()
         if deser_function_name:
             getattr(t, deser_function_name)(f)
         else:
             t.deserialize(f)
         r.append(t)
     return r
 
 
 # ser_function_name: Allow for an alternate serialization function on the
 # entries in the vector.
 def ser_vector(v, ser_function_name=None):
     r = ser_compact_size(len(v))
     for i in v:
         if ser_function_name:
             r += getattr(i, ser_function_name)()
         else:
             r += i.serialize()
     return r
 
 
 def deser_uint256_vector(f):
     nit = deser_compact_size(f)
     r = []
     for _ in range(nit):
         t = deser_uint256(f)
         r.append(t)
     return r
 
 
 def ser_uint256_vector(v):
     r = ser_compact_size(len(v))
     for i in v:
         r += ser_uint256(i)
     return r
 
 
 def deser_string_vector(f):
     nit = deser_compact_size(f)
     r = []
     for _ in range(nit):
         t = deser_string(f)
         r.append(t)
     return r
 
 
 def ser_string_vector(v):
     r = ser_compact_size(len(v))
     for sv in v:
         r += ser_string(sv)
     return r
 
 
 def FromHex(obj, hex_string):
     """Deserialize from a hex string representation (eg from RPC)"""
     obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
     return obj
 
 
 def ToHex(obj):
     """Convert a binary-serializable object to hex
     (eg for submission via RPC)"""
     return obj.serialize().hex()
 
 
 # Objects that map to bitcoind objects, which can be serialized/deserialized
 
 class CAddress:
     __slots__ = ("net", "ip", "nServices", "port", "time")
 
     # see https://github.com/bitcoin/bips/blob/master/bip-0155.mediawiki
     NET_IPV4 = 1
 
     ADDRV2_NET_NAME = {
         NET_IPV4: "IPv4"
     }
 
     ADDRV2_ADDRESS_LENGTH = {
         NET_IPV4: 4
     }
 
     def __init__(self):
         self.time = 0
         self.nServices = 1
         self.net = self.NET_IPV4
         self.ip = "0.0.0.0"
         self.port = 0
 
     def deserialize(self, f, *, with_time=True):
         """Deserialize from addrv1 format (pre-BIP155)"""
         if with_time:
             # VERSION messages serialize CAddress objects without time
             self.time = struct.unpack("<I", f.read(4))[0]
         self.nServices = struct.unpack("<Q", f.read(8))[0]
         # We only support IPv4 which means skip 12 bytes and read the next 4 as
         # IPv4 address.
         f.read(12)
         self.net = self.NET_IPV4
         self.ip = socket.inet_ntoa(f.read(4))
         self.port = struct.unpack(">H", f.read(2))[0]
 
     def serialize(self, *, with_time=True):
         """Serialize in addrv1 format (pre-BIP155)"""
         assert self.net == self.NET_IPV4
         r = b""
         if with_time:
             # VERSION messages serialize CAddress objects without time
             r += struct.pack("<I", self.time)
         r += struct.pack("<Q", self.nServices)
         r += b"\x00" * 10 + b"\xff" * 2
         r += socket.inet_aton(self.ip)
         r += struct.pack(">H", self.port)
         return r
 
     def deserialize_v2(self, f):
         """Deserialize from addrv2 format (BIP155)"""
         self.time = struct.unpack("<I", f.read(4))[0]
 
         self.nServices = deser_compact_size(f)
 
         self.net = struct.unpack("B", f.read(1))[0]
         assert self.net == self.NET_IPV4
 
         address_length = deser_compact_size(f)
         assert address_length == self.ADDRV2_ADDRESS_LENGTH[self.net]
 
         self.ip = socket.inet_ntoa(f.read(4))
 
         self.port = struct.unpack(">H", f.read(2))[0]
 
     def serialize_v2(self):
         """Serialize in addrv2 format (BIP155)"""
         assert self.net == self.NET_IPV4
         r = b""
         r += struct.pack("<I", self.time)
         r += ser_compact_size(self.nServices)
         r += struct.pack("B", self.net)
         r += ser_compact_size(self.ADDRV2_ADDRESS_LENGTH[self.net])
         r += socket.inet_aton(self.ip)
         r += struct.pack(">H", self.port)
         return r
 
     def __repr__(self):
         return ("CAddress(nServices=%i net=%s addr=%s port=%i)"
                 % (self.nServices, self.ADDRV2_NET_NAME[self.net], self.ip, self.port))
 
 
 class CInv:
     __slots__ = ("hash", "type")
 
     typemap = {
         0: "Error",
         MSG_TX: "TX",
         MSG_BLOCK: "Block",
         MSG_FILTERED_BLOCK: "filtered Block",
         MSG_CMPCT_BLOCK: "CompactBlock",
         MSG_AVA_PROOF: "avalanche proof",
     }
 
     def __init__(self, t=0, h=0):
         self.type = t
         self.hash = h
 
     def deserialize(self, f):
         self.type = struct.unpack("<i", f.read(4))[0]
         self.hash = deser_uint256(f)
 
     def serialize(self):
         r = b""
         r += struct.pack("<i", self.type)
         r += ser_uint256(self.hash)
         return r
 
     def __repr__(self):
         return "CInv(type={} hash={:064x})".format(
             self.typemap[self.type], self.hash)
 
     def __eq__(self, other):
         return isinstance(
             other, CInv) and self.hash == other.hash and self.type == other.type
 
 
 class CBlockLocator:
     __slots__ = ("nVersion", "vHave")
 
     def __init__(self):
         self.nVersion = MY_VERSION
         self.vHave = []
 
     def deserialize(self, f):
         self.nVersion = struct.unpack("<i", f.read(4))[0]
         self.vHave = deser_uint256_vector(f)
 
     def serialize(self):
         r = b""
         r += struct.pack("<i", self.nVersion)
         r += ser_uint256_vector(self.vHave)
         return r
 
     def __repr__(self):
         return "CBlockLocator(nVersion={} vHave={})".format(
             self.nVersion, repr(self.vHave))
 
 
 class COutPoint:
     __slots__ = ("hash", "n")
 
     def __init__(self, hash=0, n=0):
         self.hash = hash
         self.n = n
 
     def deserialize(self, f):
         self.hash = deser_uint256(f)
         self.n = struct.unpack("<I", f.read(4))[0]
 
     def serialize(self):
         r = b""
         r += ser_uint256(self.hash)
         r += struct.pack("<I", self.n)
         return r
 
     def __repr__(self):
         return "COutPoint(hash={:064x} n={})".format(self.hash, self.n)
 
 
 class CTxIn:
     __slots__ = ("nSequence", "prevout", "scriptSig")
 
     def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
         if outpoint is None:
             self.prevout = COutPoint()
         else:
             self.prevout = outpoint
         self.scriptSig = scriptSig
         self.nSequence = nSequence
 
     def deserialize(self, f):
         self.prevout = COutPoint()
         self.prevout.deserialize(f)
         self.scriptSig = deser_string(f)
         self.nSequence = struct.unpack("<I", f.read(4))[0]
 
     def serialize(self):
         r = b""
         r += self.prevout.serialize()
         r += ser_string(self.scriptSig)
         r += struct.pack("<I", self.nSequence)
         return r
 
     def __repr__(self):
         return "CTxIn(prevout={} scriptSig={} nSequence={})".format(
             repr(self.prevout), self.scriptSig.hex(), self.nSequence)
 
 
 class CTxOut:
     __slots__ = ("nValue", "scriptPubKey")
 
     def __init__(self, nValue=0, scriptPubKey=b""):
         self.nValue = nValue
         self.scriptPubKey = scriptPubKey
 
     def deserialize(self, f):
         self.nValue = struct.unpack("<q", f.read(8))[0]
         self.scriptPubKey = deser_string(f)
 
     def serialize(self):
         r = b""
         r += struct.pack("<q", self.nValue)
         r += ser_string(self.scriptPubKey)
         return r
 
     def __repr__(self):
         return "CTxOut(nValue={}.{:02d} scriptPubKey={})".format(
             self.nValue // XEC, self.nValue % XEC, self.scriptPubKey.hex())
 
 
 class CTransaction:
     __slots__ = ("hash", "nLockTime", "nVersion", "sha256", "vin", "vout")
 
     def __init__(self, tx=None):
         if tx is None:
             self.nVersion = 1
             self.vin = []
             self.vout = []
             self.nLockTime = 0
             self.sha256 = None
             self.hash = None
         else:
             self.nVersion = tx.nVersion
             self.vin = copy.deepcopy(tx.vin)
             self.vout = copy.deepcopy(tx.vout)
             self.nLockTime = tx.nLockTime
             self.sha256 = tx.sha256
             self.hash = tx.hash
 
     def deserialize(self, f):
         self.nVersion = struct.unpack("<i", f.read(4))[0]
         self.vin = deser_vector(f, CTxIn)
         self.vout = deser_vector(f, CTxOut)
         self.nLockTime = struct.unpack("<I", f.read(4))[0]
         self.sha256 = None
         self.hash = None
 
     def billable_size(self):
         """
         Returns the size used for billing the against the transaction
         """
         return len(self.serialize())
 
     def serialize(self):
         r = b""
         r += struct.pack("<i", self.nVersion)
         r += ser_vector(self.vin)
         r += ser_vector(self.vout)
         r += struct.pack("<I", self.nLockTime)
         return r
 
     # Recalculate the txid
     def rehash(self):
         self.sha256 = None
         self.calc_sha256()
         return self.hash
 
     # self.sha256 and self.hash -- those are expected to be the txid.
     def calc_sha256(self):
         if self.sha256 is None:
             self.sha256 = uint256_from_str(hash256(self.serialize()))
         self.hash = encode(
             hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
 
     def get_id(self):
         # For now, just forward the hash.
         self.calc_sha256()
         return self.hash
 
     def is_valid(self):
         self.calc_sha256()
         for tout in self.vout:
             if tout.nValue < 0 or tout.nValue > MAX_MONEY:
                 return False
         return True
 
     def __repr__(self):
         return "CTransaction(nVersion={} vin={} vout={} nLockTime={})".format(
             self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
 
 
 class CBlockHeader:
     __slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce",
                  "nTime", "nVersion", "sha256")
 
     def __init__(self, header=None):
         if header is None:
             self.set_null()
         else:
             self.nVersion = header.nVersion
             self.hashPrevBlock = header.hashPrevBlock
             self.hashMerkleRoot = header.hashMerkleRoot
             self.nTime = header.nTime
             self.nBits = header.nBits
             self.nNonce = header.nNonce
             self.sha256 = header.sha256
             self.hash = header.hash
             self.calc_sha256()
 
     def set_null(self):
         self.nVersion = 1
         self.hashPrevBlock = 0
         self.hashMerkleRoot = 0
         self.nTime = 0
         self.nBits = 0
         self.nNonce = 0
         self.sha256 = None
         self.hash = None
 
     def deserialize(self, f):
         self.nVersion = struct.unpack("<i", f.read(4))[0]
         self.hashPrevBlock = deser_uint256(f)
         self.hashMerkleRoot = deser_uint256(f)
         self.nTime = struct.unpack("<I", f.read(4))[0]
         self.nBits = struct.unpack("<I", f.read(4))[0]
         self.nNonce = struct.unpack("<I", f.read(4))[0]
         self.sha256 = None
         self.hash = None
 
     def serialize(self):
         r = b""
         r += struct.pack("<i", self.nVersion)
         r += ser_uint256(self.hashPrevBlock)
         r += ser_uint256(self.hashMerkleRoot)
         r += struct.pack("<I", self.nTime)
         r += struct.pack("<I", self.nBits)
         r += struct.pack("<I", self.nNonce)
         return r
 
     def calc_sha256(self):
         if self.sha256 is None:
             r = b""
             r += struct.pack("<i", self.nVersion)
             r += ser_uint256(self.hashPrevBlock)
             r += ser_uint256(self.hashMerkleRoot)
             r += struct.pack("<I", self.nTime)
             r += struct.pack("<I", self.nBits)
             r += struct.pack("<I", self.nNonce)
             self.sha256 = uint256_from_str(hash256(r))
             self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
 
     def rehash(self):
         self.sha256 = None
         self.calc_sha256()
         return self.sha256
 
     def __repr__(self):
         return "CBlockHeader(nVersion={} hashPrevBlock={:064x} hashMerkleRoot={:064x} nTime={} nBits={:08x} nNonce={:08x})".format(
             self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
             self.nTime, self.nBits, self.nNonce)
 
 
 BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
 assert_equal(BLOCK_HEADER_SIZE, 80)
 
 
 class CBlock(CBlockHeader):
     __slots__ = ("vtx",)
 
     def __init__(self, header=None):
         super().__init__(header)
         self.vtx = []
 
     def deserialize(self, f):
         super().deserialize(f)
         self.vtx = deser_vector(f, CTransaction)
 
     def serialize(self):
         r = b""
         r += super().serialize()
         r += ser_vector(self.vtx)
         return r
 
     # Calculate the merkle root given a vector of transaction hashes
     def get_merkle_root(self, hashes):
         while len(hashes) > 1:
             newhashes = []
             for i in range(0, len(hashes), 2):
                 i2 = min(i + 1, len(hashes) - 1)
                 newhashes.append(hash256(hashes[i] + hashes[i2]))
             hashes = newhashes
         return uint256_from_str(hashes[0])
 
     def calc_merkle_root(self):
         hashes = []
         for tx in self.vtx:
             tx.calc_sha256()
             hashes.append(ser_uint256(tx.sha256))
         return self.get_merkle_root(hashes)
 
     def is_valid(self):
         self.calc_sha256()
         target = uint256_from_compact(self.nBits)
         if self.sha256 > target:
             return False
         for tx in self.vtx:
             if not tx.is_valid():
                 return False
         if self.calc_merkle_root() != self.hashMerkleRoot:
             return False
         return True
 
     def solve(self):
         self.rehash()
         target = uint256_from_compact(self.nBits)
         while self.sha256 > target:
             self.nNonce += 1
             self.rehash()
 
     def __repr__(self):
         return "CBlock(nVersion={} hashPrevBlock={:064x} hashMerkleRoot={:064x} nTime={} nBits={:08x} nNonce={:08x} vtx={})".format(
             self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
             self.nTime, self.nBits, self.nNonce, repr(self.vtx))
 
 
 class PrefilledTransaction:
     __slots__ = ("index", "tx")
 
     def __init__(self, index=0, tx=None):
         self.index = index
         self.tx = tx
 
     def deserialize(self, f):
         self.index = deser_compact_size(f)
         self.tx = CTransaction()
         self.tx.deserialize(f)
 
     def serialize(self):
         r = b""
         r += ser_compact_size(self.index)
         r += self.tx.serialize()
         return r
 
     def __repr__(self):
         return "PrefilledTransaction(index={}, tx={})".format(
             self.index, repr(self.tx))
 
 
 # This is what we send on the wire, in a cmpctblock message.
 class P2PHeaderAndShortIDs:
     __slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length",
                  "shortids", "shortids_length")
 
     def __init__(self):
         self.header = CBlockHeader()
         self.nonce = 0
         self.shortids_length = 0
         self.shortids = []
         self.prefilled_txn_length = 0
         self.prefilled_txn = []
 
     def deserialize(self, f):
         self.header.deserialize(f)
         self.nonce = struct.unpack("<Q", f.read(8))[0]
         self.shortids_length = deser_compact_size(f)
         for _ in range(self.shortids_length):
             # shortids are defined to be 6 bytes in the spec, so append
             # two zero bytes and read it in as an 8-byte number
             self.shortids.append(
                 struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
         self.prefilled_txn = deser_vector(f, PrefilledTransaction)
         self.prefilled_txn_length = len(self.prefilled_txn)
 
     def serialize(self):
         r = b""
         r += self.header.serialize()
         r += struct.pack("<Q", self.nonce)
         r += ser_compact_size(self.shortids_length)
         for x in self.shortids:
             # We only want the first 6 bytes
             r += struct.pack("<Q", x)[0:6]
         r += ser_vector(self.prefilled_txn)
         return r
 
     def __repr__(self):
         return "P2PHeaderAndShortIDs(header={}, nonce={}, shortids_length={}, shortids={}, prefilled_txn_length={}, prefilledtxn={}".format(
             repr(self.header), self.nonce, self.shortids_length,
             repr(self.shortids), self.prefilled_txn_length,
             repr(self.prefilled_txn))
 
 
 def calculate_shortid(k0, k1, tx_hash):
     """Calculate the BIP 152-compact blocks shortid for a given
     transaction hash"""
     expected_shortid = siphash256(k0, k1, tx_hash)
     expected_shortid &= 0x0000ffffffffffff
     return expected_shortid
 
 
 # This version gets rid of the array lengths, and reinterprets the differential
 # encoding into indices that can be used for lookup.
 class HeaderAndShortIDs:
     __slots__ = ("header", "nonce", "prefilled_txn", "shortids")
 
     def __init__(self, p2pheaders_and_shortids=None):
         self.header = CBlockHeader()
         self.nonce = 0
         self.shortids = []
         self.prefilled_txn = []
 
         if p2pheaders_and_shortids is not None:
             self.header = p2pheaders_and_shortids.header
             self.nonce = p2pheaders_and_shortids.nonce
             self.shortids = p2pheaders_and_shortids.shortids
             last_index = -1
             for x in p2pheaders_and_shortids.prefilled_txn:
                 self.prefilled_txn.append(
                     PrefilledTransaction(x.index + last_index + 1, x.tx))
                 last_index = self.prefilled_txn[-1].index
 
     def to_p2p(self):
         ret = P2PHeaderAndShortIDs()
         ret.header = self.header
         ret.nonce = self.nonce
         ret.shortids_length = len(self.shortids)
         ret.shortids = self.shortids
         ret.prefilled_txn_length = len(self.prefilled_txn)
         ret.prefilled_txn = []
         last_index = -1
         for x in self.prefilled_txn:
             ret.prefilled_txn.append(
                 PrefilledTransaction(x.index - last_index - 1, x.tx))
             last_index = x.index
         return ret
 
     def get_siphash_keys(self):
         header_nonce = self.header.serialize()
         header_nonce += struct.pack("<Q", self.nonce)
         hash_header_nonce_as_str = sha256(header_nonce)
         key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
         key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
         return [key0, key1]
 
     # Version 2 compact blocks use wtxid in shortids (rather than txid)
     def initialize_from_block(self, block, nonce=0, prefill_list=None):
         if prefill_list is None:
             prefill_list = [0]
         self.header = CBlockHeader(block)
         self.nonce = nonce
         self.prefilled_txn = [PrefilledTransaction(i, block.vtx[i])
                               for i in prefill_list]
         self.shortids = []
         [k0, k1] = self.get_siphash_keys()
         for i in range(len(block.vtx)):
             if i not in prefill_list:
                 tx_hash = block.vtx[i].sha256
                 self.shortids.append(calculate_shortid(k0, k1, tx_hash))
 
     def __repr__(self):
         return "HeaderAndShortIDs(header={}, nonce={}, shortids={}, prefilledtxn={}".format(
             repr(self.header), self.nonce, repr(self.shortids),
             repr(self.prefilled_txn))
 
 
 class BlockTransactionsRequest:
     __slots__ = ("blockhash", "indexes")
 
     def __init__(self, blockhash=0, indexes=None):
         self.blockhash = blockhash
         self.indexes = indexes if indexes is not None else []
 
     def deserialize(self, f):
         self.blockhash = deser_uint256(f)
         indexes_length = deser_compact_size(f)
         for _ in range(indexes_length):
             self.indexes.append(deser_compact_size(f))
 
     def serialize(self):
         r = b""
         r += ser_uint256(self.blockhash)
         r += ser_compact_size(len(self.indexes))
         for x in self.indexes:
             r += ser_compact_size(x)
         return r
 
     # helper to set the differentially encoded indexes from absolute ones
     def from_absolute(self, absolute_indexes):
         self.indexes = []
         last_index = -1
         for x in absolute_indexes:
             self.indexes.append(x - last_index - 1)
             last_index = x
 
     def to_absolute(self):
         absolute_indexes = []
         last_index = -1
         for x in self.indexes:
             absolute_indexes.append(x + last_index + 1)
             last_index = absolute_indexes[-1]
         return absolute_indexes
 
     def __repr__(self):
         return "BlockTransactionsRequest(hash={:064x} indexes={})".format(
             self.blockhash, repr(self.indexes))
 
 
 class BlockTransactions:
     __slots__ = ("blockhash", "transactions")
 
     def __init__(self, blockhash=0, transactions=None):
         self.blockhash = blockhash
         self.transactions = transactions if transactions is not None else []
 
     def deserialize(self, f):
         self.blockhash = deser_uint256(f)
         self.transactions = deser_vector(f, CTransaction)
 
     def serialize(self):
         r = b""
         r += ser_uint256(self.blockhash)
         r += ser_vector(self.transactions)
         return r
 
     def __repr__(self):
         return "BlockTransactions(hash={:064x} transactions={})".format(
             self.blockhash, repr(self.transactions))
 
 
 class AvalancheStake:
     def __init__(self, utxo=None, amount=0, height=0,
                  pubkey=b"", is_coinbase=False):
         self.utxo: COutPoint = utxo or COutPoint()
         self.amount: int = amount
         """Amount in satoshis (int64)"""
         self.height: int = height
         """Block height containing this utxo (uint32)"""
         self.pubkey: bytes = pubkey
         """Public key"""
 
         self.is_coinbase: bool = is_coinbase
 
     def deserialize(self, f):
         self.utxo = COutPoint()
         self.utxo.deserialize(f)
         self.amount = struct.unpack("<q", f.read(8))[0]
         height_ser = struct.unpack("<I", f.read(4))[0]
         self.is_coinbase = bool(height_ser & 1)
         self.height = height_ser >> 1
         self.pubkey = deser_string(f)
 
     def serialize(self) -> bytes:
         r = self.utxo.serialize()
         height_ser = self.height << 1 | int(self.is_coinbase)
         r += struct.pack('<q', self.amount)
         r += struct.pack('<I', height_ser)
         r += ser_compact_size(len(self.pubkey))
         r += self.pubkey
         return r
 
     def get_hash(self, proofid) -> bytes:
         """Return the bitcoin hash of the concatenation of proofid
         and the serialized stake."""
         return hash256(proofid + self.serialize())
 
     def __repr__(self):
         return f"AvalancheStake(utxo={self.utxo}, amount={self.amount}," \
                f" height={self.height}, " \
                f"pubkey={self.pubkey.hex()})"
 
 
 class AvalancheSignedStake:
     def __init__(self, stake=None, sig=b""):
         self.stake: AvalancheStake = stake or AvalancheStake()
         self.sig: bytes = sig
         """Signature for this stake, bytes of length 64"""
 
     def deserialize(self, f):
         self.stake = AvalancheStake()
         self.stake.deserialize(f)
         self.sig = f.read(64)
 
     def serialize(self) -> bytes:
         return self.stake.serialize() + self.sig
 
 
 class AvalancheProof:
     __slots__ = (
         "sequence",
         "expiration",
         "master",
         "stakes",
         "limited_proofid",
         "proofid")
 
     def __init__(self, sequence=0, expiration=0,
                  master=b"", signed_stakes=None):
         self.sequence: int = sequence
         self.expiration: int = expiration
         self.master: bytes = master
 
         self.stakes: List[AvalancheSignedStake] = signed_stakes or [
             AvalancheSignedStake()]
 
         self.limited_proofid: int = None
         self.proofid: int = None
         self.compute_proof_id()
 
     def compute_proof_id(self):
         """Compute Bitcoin's 256-bit hash (double SHA-256) of the
         serialized proof data.
         """
         ss = struct.pack("<Qq", self.sequence, self.expiration)
         ss += ser_compact_size(len(self.stakes))
         # Use unsigned stakes
         for s in self.stakes:
             ss += s.stake.serialize()
         h = hash256(ss)
         self.limited_proofid = uint256_from_str(h)
         h += ser_string(self.master)
         h = hash256(h)
         # make it an int, for comparing with Delegation.proofid
         self.proofid = uint256_from_str(h)
 
     def deserialize(self, f):
         self.sequence = struct.unpack("<Q", f.read(8))[0]
         self.expiration = struct.unpack("<q", f.read(8))[0]
         self.master = deser_string(f)
         self.stakes = deser_vector(f, AvalancheSignedStake)
         self.compute_proof_id()
 
     def serialize(self):
         r = b""
         r += struct.pack("<Q", self.sequence)
         r += struct.pack("<q", self.expiration)
         r += ser_string(self.master)
         r += ser_vector(self.stakes)
         return r
 
     def __repr__(self):
         return f"AvalancheProof(sequence={self.sequence}, " \
                f"expiration={self.expiration}, " \
                f"master={self.master.hex()}, " \
                f"stakes={self.stakes})"
 
 
 class AvalanchePoll():
     __slots__ = ("round", "invs")
 
     def __init__(self, round=0, invs=None):
         self.round = round
         self.invs = invs if invs is not None else []
 
     def deserialize(self, f):
         self.round = struct.unpack("<q", f.read(8))[0]
         self.invs = deser_vector(f, CInv)
 
     def serialize(self):
         r = b""
         r += struct.pack("<q", self.round)
         r += ser_vector(self.invs)
         return r
 
     def __repr__(self):
         return "AvalanchePoll(round={}, invs={})".format(
             self.round, repr(self.invs))
 
 
 class AvalancheVote():
     __slots__ = ("error", "hash")
 
     def __init__(self, e=0, h=0):
         self.error = e
         self.hash = h
 
     def deserialize(self, f):
         self.error = struct.unpack("<i", f.read(4))[0]
         self.hash = deser_uint256(f)
 
     def serialize(self):
         r = b""
         r += struct.pack("<i", self.error)
         r += ser_uint256(self.hash)
         return r
 
     def __repr__(self):
         return "AvalancheVote(error={}, hash={:064x})".format(
             self.error, self.hash)
 
 
 class AvalancheResponse():
     __slots__ = ("round", "cooldown", "votes")
 
     def __init__(self, round=0, cooldown=0, votes=None):
         self.round = round
         self.cooldown = cooldown
         self.votes = votes if votes is not None else []
 
     def deserialize(self, f):
         self.round = struct.unpack("<q", f.read(8))[0]
         self.cooldown = struct.unpack("<i", f.read(4))[0]
         self.votes = deser_vector(f, AvalancheVote)
 
     def serialize(self):
         r = b""
         r += struct.pack("<q", self.round)
         r += struct.pack("<i", self.cooldown)
         r += ser_vector(self.votes)
         return r
 
     def get_hash(self):
         return hash256(self.serialize())
 
     def __repr__(self):
         return "AvalancheResponse(round={}, cooldown={}, votes={})".format(
             self.round, self.cooldown, repr(self.votes))
 
 
 class TCPAvalancheResponse():
     __slots__ = ("response", "sig")
 
     def __init__(self, response=AvalancheResponse(), sig=b"\0" * 64):
         self.response = response
         self.sig = sig
 
     def deserialize(self, f):
         self.response.deserialize(f)
         self.sig = f.read(64)
 
     def serialize(self):
         r = b""
         r += self.response.serialize()
         r += self.sig
         return r
 
     def __repr__(self):
         return "TCPAvalancheResponse(response={}, sig={})".format(
             repr(self.response), self.sig)
 
 
 class AvalancheDelegationLevel:
     __slots__ = ("pubkey", "sig")
 
     def __init__(self, pubkey=b"", sig=b"\0" * 64):
         self.pubkey = pubkey
         self.sig = sig
 
     def deserialize(self, f):
         self.pubkey = deser_string(f)
         self.sig = f.read(64)
 
     def serialize(self):
         r = b""
         r += ser_string(self.pubkey)
         r += self.sig
         return r
 
     def __repr__(self):
         return "AvalancheDelegationLevel(pubkey={}, sig={})".format(
             self.pubkey.hex(), self.sig)
 
 
 class AvalancheDelegation:
     __slots__ = ("limited_proofid", "proof_master", "proofid", "levels")
 
     def __init__(self, limited_proofid=0,
                  proof_master=b"", levels=None):
         self.limited_proofid: int = limited_proofid
         self.proof_master: bytes = proof_master
         self.levels: List[AvalancheDelegationLevel] = levels or []
         self.proofid: int = self.compute_proofid()
 
     def compute_proofid(self) -> int:
         return uint256_from_str(hash256(
             ser_uint256(self.limited_proofid) + ser_string(self.proof_master)))
 
     def deserialize(self, f):
         self.limited_proofid = deser_uint256(f)
         self.proof_master = deser_string(f)
         self.levels = deser_vector(f, AvalancheDelegationLevel)
 
         self.proofid = self.compute_proofid()
 
     def serialize(self):
         r = b""
         r += ser_uint256(self.limited_proofid)
         r += ser_string(self.proof_master)
         r += ser_vector(self.levels)
         return r
 
     def __repr__(self):
         return f"AvalancheDelegation(limitedProofId={self.limited_proofid:064x}, " \
                f"proofMaster={self.proof_master.hex()}, proofid={self.proofid:064x}, " \
                f"levels={self.levels})"
 
     def getid(self):
         h = ser_uint256(self.proofid)
         for level in self.levels:
             h = hash256(h + ser_string(level.pubkey))
         return h
 
 
 class AvalancheHello():
     __slots__ = ("delegation", "sig")
 
     def __init__(self, delegation=AvalancheDelegation(), sig=b"\0" * 64):
         self.delegation = delegation
         self.sig = sig
 
     def deserialize(self, f):
         self.delegation.deserialize(f)
         self.sig = f.read(64)
 
     def serialize(self):
         r = b""
         r += self.delegation.serialize()
         r += self.sig
         return r
 
     def __repr__(self):
         return "AvalancheHello(delegation={}, sig={})".format(
             repr(self.delegation), self.sig)
 
     def get_sighash(self, node):
         b = self.delegation.getid()
         b += struct.pack("<Q", node.remote_nonce)
         b += struct.pack("<Q", node.local_nonce)
         b += struct.pack("<Q", node.remote_extra_entropy)
         b += struct.pack("<Q", node.local_extra_entropy)
         return hash256(b)
 
 
 class CPartialMerkleTree:
     __slots__ = ("nTransactions", "vBits", "vHash")
 
     def __init__(self):
         self.nTransactions = 0
         self.vHash = []
         self.vBits = []
 
     def deserialize(self, f):
         self.nTransactions = struct.unpack("<i", f.read(4))[0]
         self.vHash = deser_uint256_vector(f)
         vBytes = deser_string(f)
         self.vBits = []
         for i in range(len(vBytes) * 8):
             self.vBits.append(vBytes[i // 8] & (1 << (i % 8)) != 0)
 
     def serialize(self):
         r = b""
         r += struct.pack("<i", self.nTransactions)
         r += ser_uint256_vector(self.vHash)
         vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7) // 8))
         for i in range(len(self.vBits)):
             vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
         r += ser_string(bytes(vBytesArray))
         return r
 
     def __repr__(self):
         return "CPartialMerkleTree(nTransactions={}, vHash={}, vBits={})".format(
             self.nTransactions, repr(self.vHash), repr(self.vBits))
 
 
 class CMerkleBlock:
     __slots__ = ("header", "txn")
 
     def __init__(self):
         self.header = CBlockHeader()
         self.txn = CPartialMerkleTree()
 
     def deserialize(self, f):
         self.header.deserialize(f)
         self.txn.deserialize(f)
 
     def serialize(self):
         r = b""
         r += self.header.serialize()
         r += self.txn.serialize()
         return r
 
     def __repr__(self):
         return "CMerkleBlock(header={}, txn={})".format(
             repr(self.header), repr(self.txn))
 
 
 # Objects that correspond to messages on the wire
 
 class msg_version:
     __slots__ = ("addrFrom", "addrTo", "nNonce", "nRelay", "nServices",
                  "nStartingHeight", "nTime", "nVersion", "strSubVer", "nExtraEntropy")
     msgtype = b"version"
 
     def __init__(self):
         self.nVersion = MY_VERSION
         self.nServices = 1
         self.nTime = int(time.time())
         self.addrTo = CAddress()
         self.addrFrom = CAddress()
         self.nNonce = random.getrandbits(64)
         self.strSubVer = MY_SUBVERSION
         self.nStartingHeight = -1
         self.nRelay = MY_RELAY
         self.nExtraEntropy = random.getrandbits(64)
 
     def deserialize(self, f):
         self.nVersion = struct.unpack("<i", f.read(4))[0]
         self.nServices = struct.unpack("<Q", f.read(8))[0]
         self.nTime = struct.unpack("<q", f.read(8))[0]
         self.addrTo = CAddress()
         self.addrTo.deserialize(f, with_time=False)
 
         self.addrFrom = CAddress()
         self.addrFrom.deserialize(f, with_time=False)
         self.nNonce = struct.unpack("<Q", f.read(8))[0]
         self.strSubVer = deser_string(f)
 
         self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
 
         self.nRelay = struct.unpack("<b", f.read(1))[0]
 
         self.nExtraEntropy = struct.unpack("<Q", f.read(8))[0]
 
     def serialize(self):
         r = b""
         r += struct.pack("<i", self.nVersion)
         r += struct.pack("<Q", self.nServices)
         r += struct.pack("<q", self.nTime)
         r += self.addrTo.serialize(with_time=False)
         r += self.addrFrom.serialize(with_time=False)
         r += struct.pack("<Q", self.nNonce)
         r += ser_string(self.strSubVer)
         r += struct.pack("<i", self.nStartingHeight)
         r += struct.pack("<b", self.nRelay)
         r += struct.pack("<Q", self.nExtraEntropy)
         return r
 
     def __repr__(self):
         return 'msg_version(nVersion={} nServices={} nTime={} addrTo={} addrFrom={} nNonce=0x{:016X} strSubVer={} nStartingHeight={} nRelay={} nExtraEntropy={})'.format(
             self.nVersion, self.nServices, self.nTime,
             repr(self.addrTo), repr(self.addrFrom), self.nNonce,
             self.strSubVer, self.nStartingHeight, self.nRelay, self.nExtraEntropy)
 
 
 class msg_verack:
     __slots__ = ()
     msgtype = b"verack"
 
     def __init__(self):
         pass
 
     def deserialize(self, f):
         pass
 
     def serialize(self):
         return b""
 
     def __repr__(self):
         return "msg_verack()"
 
 
 class msg_addr:
     __slots__ = ("addrs",)
     msgtype = b"addr"
 
     def __init__(self):
         self.addrs = []
 
     def deserialize(self, f):
         self.addrs = deser_vector(f, CAddress)
 
     def serialize(self):
         return ser_vector(self.addrs)
 
     def __repr__(self):
         return "msg_addr(addrs={})".format(repr(self.addrs))
 
 
 class msg_addrv2:
     __slots__ = ("addrs",)
     msgtype = b"addrv2"
 
     def __init__(self):
         self.addrs = []
 
     def deserialize(self, f):
         self.addrs = deser_vector(f, CAddress, "deserialize_v2")
 
     def serialize(self):
         return ser_vector(self.addrs, "serialize_v2")
 
     def __repr__(self):
         return "msg_addrv2(addrs={})".format(repr(self.addrs))
 
 
 class msg_sendaddrv2:
     __slots__ = ()
     msgtype = b"sendaddrv2"
 
     def __init__(self):
         pass
 
     def deserialize(self, f):
         pass
 
     def serialize(self):
         return b""
 
     def __repr__(self):
         return "msg_sendaddrv2()"
 
 
 class msg_inv:
     __slots__ = ("inv",)
     msgtype = b"inv"
 
     def __init__(self, inv=None):
         if inv is None:
             self.inv = []
         else:
             self.inv = inv
 
     def deserialize(self, f):
         self.inv = deser_vector(f, CInv)
 
     def serialize(self):
         return ser_vector(self.inv)
 
     def __repr__(self):
         return "msg_inv(inv={})".format(repr(self.inv))
 
 
 class msg_getdata:
     __slots__ = ("inv",)
     msgtype = b"getdata"
 
     def __init__(self, inv=None):
         self.inv = inv if inv is not None else []
 
     def deserialize(self, f):
         self.inv = deser_vector(f, CInv)
 
     def serialize(self):
         return ser_vector(self.inv)
 
     def __repr__(self):
         return "msg_getdata(inv={})".format(repr(self.inv))
 
 
 class msg_getblocks:
     __slots__ = ("locator", "hashstop")
     msgtype = b"getblocks"
 
     def __init__(self):
         self.locator = CBlockLocator()
         self.hashstop = 0
 
     def deserialize(self, f):
         self.locator = CBlockLocator()
         self.locator.deserialize(f)
         self.hashstop = deser_uint256(f)
 
     def serialize(self):
         r = b""
         r += self.locator.serialize()
         r += ser_uint256(self.hashstop)
         return r
 
     def __repr__(self):
         return "msg_getblocks(locator={} hashstop={:064x})".format(
             repr(self.locator), self.hashstop)
 
 
 class msg_tx:
     __slots__ = ("tx",)
     msgtype = b"tx"
 
     def __init__(self, tx=CTransaction()):
         self.tx = tx
 
     def deserialize(self, f):
         self.tx.deserialize(f)
 
     def serialize(self):
         return self.tx.serialize()
 
     def __repr__(self):
         return "msg_tx(tx={})".format(repr(self.tx))
 
 
 class msg_block:
     __slots__ = ("block",)
     msgtype = b"block"
 
     def __init__(self, block=None):
         if block is None:
             self.block = CBlock()
         else:
             self.block = block
 
     def deserialize(self, f):
         self.block.deserialize(f)
 
     def serialize(self):
         return self.block.serialize()
 
     def __repr__(self):
         return "msg_block(block={})".format(repr(self.block))
 
 
 # for cases where a user needs tighter control over what is sent over the wire
 # note that the user must supply the name of the msgtype, and the data
 class msg_generic:
     __slots__ = ("msgtype", "data")
 
     def __init__(self, msgtype, data=None):
         self.msgtype = msgtype
         self.data = data
 
     def serialize(self):
         return self.data
 
     def __repr__(self):
         return "msg_generic()"
 
 
 class msg_getaddr:
     __slots__ = ()
     msgtype = b"getaddr"
 
     def __init__(self):
         pass
 
     def deserialize(self, f):
         pass
 
     def serialize(self):
         return b""
 
     def __repr__(self):
         return "msg_getaddr()"
 
 
 class msg_ping:
     __slots__ = ("nonce",)
     msgtype = b"ping"
 
     def __init__(self, nonce=0):
         self.nonce = nonce
 
     def deserialize(self, f):
         self.nonce = struct.unpack("<Q", f.read(8))[0]
 
     def serialize(self):
         r = b""
         r += struct.pack("<Q", self.nonce)
         return r
 
     def __repr__(self):
         return "msg_ping(nonce={:08x})".format(self.nonce)
 
 
 class msg_pong:
     __slots__ = ("nonce",)
     msgtype = b"pong"
 
     def __init__(self, nonce=0):
         self.nonce = nonce
 
     def deserialize(self, f):
         self.nonce = struct.unpack("<Q", f.read(8))[0]
 
     def serialize(self):
         r = b""
         r += struct.pack("<Q", self.nonce)
         return r
 
     def __repr__(self):
         return "msg_pong(nonce={:08x})".format(self.nonce)
 
 
 class msg_mempool:
     __slots__ = ()
     msgtype = b"mempool"
 
     def __init__(self):
         pass
 
     def deserialize(self, f):
         pass
 
     def serialize(self):
         return b""
 
     def __repr__(self):
         return "msg_mempool()"
 
 
 class msg_notfound:
     __slots__ = ("vec", )
     msgtype = b"notfound"
 
     def __init__(self, vec=None):
         self.vec = vec or []
 
     def deserialize(self, f):
         self.vec = deser_vector(f, CInv)
 
     def serialize(self):
         return ser_vector(self.vec)
 
     def __repr__(self):
         return "msg_notfound(vec={})".format(repr(self.vec))
 
 
 class msg_sendheaders:
     __slots__ = ()
     msgtype = b"sendheaders"
 
     def __init__(self):
         pass
 
     def deserialize(self, f):
         pass
 
     def serialize(self):
         return b""
 
     def __repr__(self):
         return "msg_sendheaders()"
 
 
 # getheaders message has
 # number of entries
 # vector of hashes
 # hash_stop (hash of last desired block header, 0 to get as many as possible)
 class msg_getheaders:
     __slots__ = ("hashstop", "locator",)
     msgtype = b"getheaders"
 
     def __init__(self):
         self.locator = CBlockLocator()
         self.hashstop = 0
 
     def deserialize(self, f):
         self.locator = CBlockLocator()
         self.locator.deserialize(f)
         self.hashstop = deser_uint256(f)
 
     def serialize(self):
         r = b""
         r += self.locator.serialize()
         r += ser_uint256(self.hashstop)
         return r
 
     def __repr__(self):
         return "msg_getheaders(locator={}, stop={:064x})".format(
             repr(self.locator), self.hashstop)
 
 
 # headers message has
 # <count> <vector of block headers>
 class msg_headers:
     __slots__ = ("headers",)
     msgtype = b"headers"
 
     def __init__(self, headers=None):
         self.headers = headers if headers is not None else []
 
     def deserialize(self, f):
         # comment in bitcoind indicates these should be deserialized as blocks
         blocks = deser_vector(f, CBlock)
         for x in blocks:
             self.headers.append(CBlockHeader(x))
 
     def serialize(self):
         blocks = [CBlock(x) for x in self.headers]
         return ser_vector(blocks)
 
     def __repr__(self):
         return "msg_headers(headers={})".format(repr(self.headers))
 
 
 class msg_merkleblock:
     __slots__ = ("merkleblock",)
     msgtype = b"merkleblock"
 
     def __init__(self, merkleblock=None):
         if merkleblock is None:
             self.merkleblock = CMerkleBlock()
         else:
             self.merkleblock = merkleblock
 
     def deserialize(self, f):
         self.merkleblock.deserialize(f)
 
     def serialize(self):
         return self.merkleblock.serialize()
 
     def __repr__(self):
         return "msg_merkleblock(merkleblock={})".format(repr(self.merkleblock))
 
 
 class msg_filterload:
     __slots__ = ("data", "nHashFuncs", "nTweak", "nFlags")
     msgtype = b"filterload"
 
     def __init__(self, data=b'00', nHashFuncs=0, nTweak=0, nFlags=0):
         self.data = data
         self.nHashFuncs = nHashFuncs
         self.nTweak = nTweak
         self.nFlags = nFlags
 
     def deserialize(self, f):
         self.data = deser_string(f)
         self.nHashFuncs = struct.unpack("<I", f.read(4))[0]
         self.nTweak = struct.unpack("<I", f.read(4))[0]
         self.nFlags = struct.unpack("<B", f.read(1))[0]
 
     def serialize(self):
         r = b""
         r += ser_string(self.data)
         r += struct.pack("<I", self.nHashFuncs)
         r += struct.pack("<I", self.nTweak)
         r += struct.pack("<B", self.nFlags)
         return r
 
     def __repr__(self):
         return "msg_filterload(data={}, nHashFuncs={}, nTweak={}, nFlags={})".format(
             self.data, self.nHashFuncs, self.nTweak, self.nFlags)
 
 
 class msg_filteradd:
     __slots__ = ("data")
     msgtype = b"filteradd"
 
     def __init__(self, data):
         self.data = data
 
     def deserialize(self, f):
         self.data = deser_string(f)
 
     def serialize(self):
         r = b""
         r += ser_string(self.data)
         return r
 
     def __repr__(self):
         return "msg_filteradd(data={})".format(self.data)
 
 
 class msg_filterclear:
     __slots__ = ()
     msgtype = b"filterclear"
 
     def __init__(self):
         pass
 
     def deserialize(self, f):
         pass
 
     def serialize(self):
         return b""
 
     def __repr__(self):
         return "msg_filterclear()"
 
 
 class msg_feefilter:
     __slots__ = ("feerate",)
     msgtype = b"feefilter"
 
     def __init__(self, feerate=0):
         self.feerate = feerate
 
     def deserialize(self, f):
         self.feerate = struct.unpack("<Q", f.read(8))[0]
 
     def serialize(self):
         r = b""
         r += struct.pack("<Q", self.feerate)
         return r
 
     def __repr__(self):
         return "msg_feefilter(feerate={:08x})".format(self.feerate)
 
 
 class msg_sendcmpct:
     __slots__ = ("announce", "version")
     msgtype = b"sendcmpct"
 
     def __init__(self):
         self.announce = False
         self.version = 1
 
     def deserialize(self, f):
         self.announce = struct.unpack("<?", f.read(1))[0]
         self.version = struct.unpack("<Q", f.read(8))[0]
 
     def serialize(self):
         r = b""
         r += struct.pack("<?", self.announce)
         r += struct.pack("<Q", self.version)
         return r
 
     def __repr__(self):
         return "msg_sendcmpct(announce={}, version={})".format(
             self.announce, self.version)
 
 
 class msg_cmpctblock:
     __slots__ = ("header_and_shortids",)
     msgtype = b"cmpctblock"
 
     def __init__(self, header_and_shortids=None):
         self.header_and_shortids = header_and_shortids
 
     def deserialize(self, f):
         self.header_and_shortids = P2PHeaderAndShortIDs()
         self.header_and_shortids.deserialize(f)
 
     def serialize(self):
         r = b""
         r += self.header_and_shortids.serialize()
         return r
 
     def __repr__(self):
         return "msg_cmpctblock(HeaderAndShortIDs={})".format(
             repr(self.header_and_shortids))
 
 
 class msg_getblocktxn:
     __slots__ = ("block_txn_request",)
     msgtype = b"getblocktxn"
 
     def __init__(self):
         self.block_txn_request = None
 
     def deserialize(self, f):
         self.block_txn_request = BlockTransactionsRequest()
         self.block_txn_request.deserialize(f)
 
     def serialize(self):
         r = b""
         r += self.block_txn_request.serialize()
         return r
 
     def __repr__(self):
         return "msg_getblocktxn(block_txn_request={})".format(
             repr(self.block_txn_request))
 
 
 class msg_blocktxn:
     __slots__ = ("block_transactions",)
     msgtype = b"blocktxn"
 
     def __init__(self):
         self.block_transactions = BlockTransactions()
 
     def deserialize(self, f):
         self.block_transactions.deserialize(f)
 
     def serialize(self):
         r = b""
         r += self.block_transactions.serialize()
         return r
 
     def __repr__(self):
         return "msg_blocktxn(block_transactions={})".format(
             repr(self.block_transactions))
 
 
 class msg_getcfilters:
     __slots__ = ("filter_type", "start_height", "stop_hash")
     msgtype = b"getcfilters"
 
     def __init__(self, filter_type, start_height, stop_hash):
         self.filter_type = filter_type
         self.start_height = start_height
         self.stop_hash = stop_hash
 
     def deserialize(self, f):
         self.filter_type = struct.unpack("<B", f.read(1))[0]
         self.start_height = struct.unpack("<I", f.read(4))[0]
         self.stop_hash = deser_uint256(f)
 
     def serialize(self):
         r = b""
         r += struct.pack("<B", self.filter_type)
         r += struct.pack("<I", self.start_height)
         r += ser_uint256(self.stop_hash)
         return r
 
     def __repr__(self):
         return "msg_getcfilters(filter_type={:#x}, start_height={}, stop_hash={:x})".format(
             self.filter_type, self.start_height, self.stop_hash)
 
 
 class msg_cfilter:
     __slots__ = ("filter_type", "block_hash", "filter_data")
     msgtype = b"cfilter"
 
     def __init__(self, filter_type=None, block_hash=None, filter_data=None):
         self.filter_type = filter_type
         self.block_hash = block_hash
         self.filter_data = filter_data
 
     def deserialize(self, f):
         self.filter_type = struct.unpack("<B", f.read(1))[0]
         self.block_hash = deser_uint256(f)
         self.filter_data = deser_string(f)
 
     def serialize(self):
         r = b""
         r += struct.pack("<B", self.filter_type)
         r += ser_uint256(self.block_hash)
         r += ser_string(self.filter_data)
         return r
 
     def __repr__(self):
         return "msg_cfilter(filter_type={:#x}, block_hash={:x})".format(
             self.filter_type, self.block_hash)
 
 
 class msg_getcfheaders:
     __slots__ = ("filter_type", "start_height", "stop_hash")
     msgtype = b"getcfheaders"
 
     def __init__(self, filter_type, start_height, stop_hash):
         self.filter_type = filter_type
         self.start_height = start_height
         self.stop_hash = stop_hash
 
     def deserialize(self, f):
         self.filter_type = struct.unpack("<B", f.read(1))[0]
         self.start_height = struct.unpack("<I", f.read(4))[0]
         self.stop_hash = deser_uint256(f)
 
     def serialize(self):
         r = b""
         r += struct.pack("<B", self.filter_type)
         r += struct.pack("<I", self.start_height)
         r += ser_uint256(self.stop_hash)
         return r
 
     def __repr__(self):
         return "msg_getcfheaders(filter_type={:#x}, start_height={}, stop_hash={:x})".format(
             self.filter_type, self.start_height, self.stop_hash)
 
 
 class msg_cfheaders:
     __slots__ = ("filter_type", "stop_hash", "prev_header", "hashes")
     msgtype = b"cfheaders"
 
     def __init__(self, filter_type=None, stop_hash=None,
                  prev_header=None, hashes=None):
         self.filter_type = filter_type
         self.stop_hash = stop_hash
         self.prev_header = prev_header
         self.hashes = hashes
 
     def deserialize(self, f):
         self.filter_type = struct.unpack("<B", f.read(1))[0]
         self.stop_hash = deser_uint256(f)
         self.prev_header = deser_uint256(f)
         self.hashes = deser_uint256_vector(f)
 
     def serialize(self):
         r = b""
         r += struct.pack("<B", self.filter_type)
         r += ser_uint256(self.stop_hash)
         r += ser_uint256(self.prev_header)
         r += ser_uint256_vector(self.hashes)
         return r
 
     def __repr__(self):
         return "msg_cfheaders(filter_type={:#x}, stop_hash={:x})".format(
             self.filter_type, self.stop_hash)
 
 
 class msg_getcfcheckpt:
     __slots__ = ("filter_type", "stop_hash")
     msgtype = b"getcfcheckpt"
 
     def __init__(self, filter_type, stop_hash):
         self.filter_type = filter_type
         self.stop_hash = stop_hash
 
     def deserialize(self, f):
         self.filter_type = struct.unpack("<B", f.read(1))[0]
         self.stop_hash = deser_uint256(f)
 
     def serialize(self):
         r = b""
         r += struct.pack("<B", self.filter_type)
         r += ser_uint256(self.stop_hash)
         return r
 
     def __repr__(self):
         return "msg_getcfcheckpt(filter_type={:#x}, stop_hash={:x})".format(
             self.filter_type, self.stop_hash)
 
 
 class msg_cfcheckpt:
     __slots__ = ("filter_type", "stop_hash", "headers")
     msgtype = b"cfcheckpt"
 
     def __init__(self, filter_type=None, stop_hash=None, headers=None):
         self.filter_type = filter_type
         self.stop_hash = stop_hash
         self.headers = headers
 
     def deserialize(self, f):
         self.filter_type = struct.unpack("<B", f.read(1))[0]
         self.stop_hash = deser_uint256(f)
         self.headers = deser_uint256_vector(f)
 
     def serialize(self):
         r = b""
         r += struct.pack("<B", self.filter_type)
         r += ser_uint256(self.stop_hash)
         r += ser_uint256_vector(self.headers)
         return r
 
     def __repr__(self):
         return "msg_cfcheckpt(filter_type={:#x}, stop_hash={:x})".format(
             self.filter_type, self.stop_hash)
 
 
 class msg_avaproof():
     __slots__ = ("proof",)
     msgtype = b"avaproof"
 
     def __init__(self):
         self.proof = AvalancheProof()
 
     def deserialize(self, f):
         self.proof.deserialize(f)
 
     def serialize(self):
         r = b""
         r += self.proof.serialize()
         return r
 
     def __repr__(self):
         return "msg_avaproof(proof={})".format(repr(self.proof))
 
 
 class msg_avapoll():
     __slots__ = ("poll",)
     msgtype = b"avapoll"
 
     def __init__(self):
         self.poll = AvalanchePoll()
 
     def deserialize(self, f):
         self.poll.deserialize(f)
 
     def serialize(self):
         r = b""
         r += self.poll.serialize()
         return r
 
     def __repr__(self):
         return "msg_avapoll(poll={})".format(repr(self.poll))
 
 
 class msg_avaresponse():
     __slots__ = ("response",)
     msgtype = b"avaresponse"
 
     def __init__(self):
         self.response = AvalancheResponse()
 
     def deserialize(self, f):
         self.response.deserialize(f)
 
     def serialize(self):
         r = b""
         r += self.response.serialize()
         return r
 
     def __repr__(self):
         return "msg_avaresponse(response={})".format(repr(self.response))
 
 
 class msg_tcpavaresponse():
     __slots__ = ("response",)
     msgtype = b"avaresponse"
 
     def __init__(self):
         self.response = TCPAvalancheResponse()
 
     def deserialize(self, f):
         self.response.deserialize(f)
 
     def serialize(self):
         r = b""
         r += self.response.serialize()
         return r
 
     def __repr__(self):
         return "msg_tcpavaresponse(response={})".format(repr(self.response))
 
 
 class msg_avahello():
     __slots__ = ("hello",)
     msgtype = b"avahello"
 
     def __init__(self):
         self.hello = AvalancheHello()
 
     def deserialize(self, f):
         self.hello.deserialize(f)
 
     def serialize(self):
         r = b""
         r += self.hello.serialize()
         return r
 
     def __repr__(self):
         return "msg_avahello(response={})".format(repr(self.hello))
 
 
 class TestFrameworkMessages(unittest.TestCase):
     def test_serialization_round_trip(self):
         """Verify that messages and serialized objects are unchanged after
         a round-trip of deserialization-serialization.
         """
 
         proof_hex = (
             "2a00000000000000fff053650000000021030b4c866585dd868a9d62348a9cd00"
             "8d6a312937048fff31670e7e920cfc7a74401b7fc19792583e9cb39843fc5e22a"
             "4e3648ab1cb18a70290b341ee8d4f550ae2400000000102700000000000078881"
             "4004104d0de0aaeaefad02b8bdc8a01a1b8b11c696bd3d66a2c5f10780d95b7df"
             "42645cd85228a6fb29940e858e7e55842ae2bd115d1ed7cc0e82d934e929c9764"
             "8cb0ac3052d58da74de7404e84ebe2940ed2b0fe85578d8230788d8387aeaa618"
             "274b0f2edc73679fd398f60e6315258c9ec348df7fcc09340ae1af37d009719b0"
             "665"
         )
 
         avaproof = FromHex(AvalancheProof(), proof_hex)
         self.assertEqual(ToHex(avaproof), proof_hex)
 
         self.assertEqual(
             f"{avaproof.proofid:0{64}x}",
             "cb33d7fac9092089f0d473c13befa012e6ee4d19abf9a42248f731d5e59e74a2"
         )
         self.assertEqual(avaproof.sequence, 42)
         self.assertEqual(avaproof.expiration, 1699999999)
         # The master key is extracted from the key_tests.cpp.
         # Associated privkey:
         #   hex: 12b004fff7f4b69ef8650e767f18f11ede158148b425660723b9f9a66e61f747
         #   WIF: cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN
         self.assertEqual(avaproof.master, bytes.fromhex(
             "030b4c866585dd868a9d62348a9cd008d6a312937048fff31670e7e920cfc7a744"
         ))
         self.assertEqual(len(avaproof.stakes), 1)
         self.assertEqual(avaproof.stakes[0].sig, bytes.fromhex(
             "c3052d58da74de7404e84ebe2940ed2b0fe85578d8230788d8387aeaa618274b"
             "0f2edc73679fd398f60e6315258c9ec348df7fcc09340ae1af37d009719b0665"
         ))
         self.assertEqual(f"{avaproof.stakes[0].stake.utxo.hash:x}",
                          "24ae50f5d4e81e340b29708ab11cab48364e2ae2c53f8439cbe983257919fcb7"
                          )
         self.assertEqual(avaproof.stakes[0].stake.utxo.n, 0)
         self.assertEqual(avaproof.stakes[0].stake.amount, 10000)
         self.assertEqual(avaproof.stakes[0].stake.height, 672828)
         self.assertEqual(avaproof.stakes[0].stake.is_coinbase, False)
         self.assertEqual(avaproof.stakes[0].stake.pubkey, bytes.fromhex(
             "04d0de0aaeaefad02b8bdc8a01a1b8b11c696bd3d66a2c5f10780d95b7df42645"
             "cd85228a6fb29940e858e7e55842ae2bd115d1ed7cc0e82d934e929c97648cb0a"
         ))
 
         msg_proof = msg_avaproof()
         msg_proof.proof = avaproof
         self.assertEqual(ToHex(msg_proof), proof_hex)
diff --git a/test/functional/test_framework/netutil.py b/test/functional/test_framework/netutil.py
index 4da80fef4..2456b4860 100644
--- a/test/functional/test_framework/netutil.py
+++ b/test/functional/test_framework/netutil.py
@@ -1,182 +1,183 @@
 #!/usr/bin/env python3
 # Copyright (c) 2014-2019 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Linux network utilities.
 
 Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
 """
 
 import array
-from binascii import unhexlify
-from errno import ENOENT, EINVAL
 import os
 import socket
 import struct
 import sys
+from binascii import unhexlify
+from errno import EINVAL, ENOENT
 
 # STATE_ESTABLISHED = '01'
 # STATE_SYN_SENT = '02'
 # STATE_SYN_RECV = '03'
 # STATE_FIN_WAIT1 = '04'
 # STATE_FIN_WAIT2 = '05'
 # STATE_TIME_WAIT = '06'
 # STATE_CLOSE = '07'
 # STATE_CLOSE_WAIT = '08'
 # STATE_LAST_ACK = '09'
 STATE_LISTEN = '0A'
 # STATE_CLOSING = '0B'
 
 
 def get_socket_inodes(pid):
     '''
     Get list of socket inodes for process pid.
     '''
     base = '/proc/{}/fd'.format(pid)
     inodes = []
     for item in os.listdir(base):
         try:
             target = os.readlink(os.path.join(base, item))
         except OSError as err:
             if err.errno == ENOENT:
                 # The file which is gone in the meantime
                 continue
             elif err.errno == EINVAL:
                 # Not a link
                 continue
             else:
                 raise
         else:
             if target.startswith('socket:'):
                 inodes.append(int(target[8:-1]))
     return inodes
 
 
 def _remove_empty(array):
     return [x for x in array if x != '']
 
 
 def _convert_ip_port(array):
     host, port = array.split(':')
     # convert host from mangled-per-four-bytes form as used by kernel
     host = unhexlify(host)
     host_out = ''
     for x in range(0, len(host) // 4):
         (val,) = struct.unpack('=I', host[x * 4:(x + 1) * 4])
         host_out += '{:08x}'.format(val)
 
     return host_out, int(port, 16)
 
 
 def netstat(typ='tcp'):
     '''
     Function to return a list with status of tcp connections at linux systems
     To get pid of all network process running on system, you must run this script
     as superuser
     '''
     with open('/proc/net/' + typ, 'r', encoding='utf8') as f:
         content = f.readlines()
         content.pop(0)
     result = []
     for line in content:
         # Split lines and remove empty spaces.
         line_array = _remove_empty(line.split(' '))
         tcp_id = line_array[0]
         l_addr = _convert_ip_port(line_array[1])
         r_addr = _convert_ip_port(line_array[2])
         state = line_array[3]
         # Need the inode to match with process pid.
         inode = int(line_array[9])
         nline = [tcp_id, l_addr, r_addr, state, inode]
         result.append(nline)
     return result
 
 
 def get_bind_addrs(pid):
     '''
     Get bind addresses as (host,port) tuples for process pid.
     '''
     inodes = get_socket_inodes(pid)
     bind_addrs = []
     for conn in netstat('tcp') + netstat('tcp6'):
         if conn[3] == STATE_LISTEN and conn[4] in inodes:
             bind_addrs.append(conn[1])
     return bind_addrs
 
 # from: http://code.activestate.com/recipes/439093/
 
 
 def all_interfaces():
     '''
     Return all interfaces that are up
     '''
     # Linux only, so only import when required
     import fcntl
 
     is_64bits = sys.maxsize > 2**32
     struct_size = 40 if is_64bits else 32
     s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
     max_possible = 8  # initial value
     while True:
         bytes = max_possible * struct_size
         names = array.array('B', b'\0' * bytes)
         outbytes = struct.unpack('iL', fcntl.ioctl(
             s.fileno(),
             0x8912,  # SIOCGIFCONF
             struct.pack('iL', bytes, names.buffer_info()[0])
         ))[0]
         if outbytes == bytes:
             max_possible *= 2
         else:
             break
     namestr = names.tobytes()
     return [(namestr[i:i + 16].split(b'\0', 1)[0],
              socket.inet_ntoa(namestr[i + 20:i + 24]))
             for i in range(0, outbytes, struct_size)]
 
 
 def addr_to_hex(addr):
     '''
     Convert string IPv4 or IPv6 address to binary address as returned by
     get_bind_addrs.
     Very naive implementation that certainly doesn't work for all IPv6 variants.
     '''
     if '.' in addr:  # IPv4
         addr = [int(x) for x in addr.split('.')]
     elif ':' in addr:  # IPv6
         sub = [[], []]  # prefix, suffix
         x = 0
         addr = addr.split(':')
         for i, comp in enumerate(addr):
             if comp == '':
                 # skip empty component at beginning or end
                 if i == 0 or i == (len(addr) - 1):
                     continue
                 x += 1  # :: skips to suffix
                 assert x < 2
             else:  # two bytes per component
                 val = int(comp, 16)
                 sub[x].append(val >> 8)
                 sub[x].append(val & 0xff)
         nullbytes = 16 - len(sub[0]) - len(sub[1])
         assert (x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)
         addr = sub[0] + ([0] * nullbytes) + sub[1]
     else:
         raise ValueError('Could not parse address {}'.format(addr))
     return bytearray(addr).hex()
 
 
 def test_ipv6_local():
     '''
     Check for (local) IPv6 support.
     '''
     import socket
+
     # By using SOCK_DGRAM this will not actually make a connection, but it will
     # fail if there is no route to IPv6 localhost.
     have_ipv6 = True
     try:
         s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
         s.connect(('::1', 0))
     except socket.error:
         have_ipv6 = False
     return have_ipv6
diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py
index 78fc16bc7..0437ac373 100755
--- a/test/functional/test_framework/p2p.py
+++ b/test/functional/test_framework/p2p.py
@@ -1,800 +1,800 @@
 #!/usr/bin/env python3
 # Copyright (c) 2010 ArtForz -- public domain half-a-node
 # Copyright (c) 2012 Jeff Garzik
 # Copyright (c) 2010-2019 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Test objects for interacting with a bitcoind node over the p2p protocol.
 
 The P2PInterface objects interact with the bitcoind nodes under test using the
 node's p2p interface. They can be used to send messages to the node, and
 callbacks can be registered that execute when messages are received from the
 node. Messages are sent to/received from the node on an asyncio event loop.
 State held inside the objects must be guarded by the p2p_lock to avoid data
 races between the main testing thread and the event loop.
 
 P2PConnection: A low-level connection object to a node's P2P interface
 P2PInterface: A high-level interface object for communicating to a node over P2P
 P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
               and can respond correctly to getdata and getheaders messages
 P2PTxInvStore: A p2p interface class that inherits from P2PDataStore, and keeps
               a count of how many times each txid has been announced."""
 
 import asyncio
-from collections import defaultdict
-from io import BytesIO
 import logging
 import struct
 import sys
 import threading
+from collections import defaultdict
+from io import BytesIO
 
 from test_framework.messages import (
-    CBlockHeader,
     MAX_HEADERS_RESULTS,
     MIN_VERSION_SUPPORTED,
+    MSG_BLOCK,
+    MSG_TX,
+    MSG_TYPE_MASK,
+    NODE_NETWORK,
+    CBlockHeader,
     msg_addr,
     msg_addrv2,
+    msg_avahello,
     msg_avapoll,
     msg_avaproof,
-    msg_tcpavaresponse,
-    msg_avahello,
     msg_block,
-    MSG_BLOCK,
     msg_blocktxn,
     msg_cfcheckpt,
     msg_cfheaders,
     msg_cfilter,
     msg_cmpctblock,
     msg_feefilter,
     msg_filteradd,
     msg_filterclear,
     msg_filterload,
     msg_getaddr,
     msg_getblocks,
     msg_getblocktxn,
     msg_getdata,
     msg_getheaders,
     msg_headers,
     msg_inv,
     msg_mempool,
     msg_merkleblock,
     msg_notfound,
     msg_ping,
     msg_pong,
     msg_sendaddrv2,
     msg_sendcmpct,
     msg_sendheaders,
+    msg_tcpavaresponse,
     msg_tx,
-    MSG_TX,
-    MSG_TYPE_MASK,
     msg_verack,
     msg_version,
-    NODE_NETWORK,
     sha256,
 )
 from test_framework.util import wait_until_helper
 
 logger = logging.getLogger("TestFramework.p2p")
 
 MESSAGEMAP = {
     b"addr": msg_addr,
     b"addrv2": msg_addrv2,
     b"avapoll": msg_avapoll,
     b"avaproof": msg_avaproof,
     b"avaresponse": msg_tcpavaresponse,
     b"avahello": msg_avahello,
     b"block": msg_block,
     b"blocktxn": msg_blocktxn,
     b"cfcheckpt": msg_cfcheckpt,
     b"cfheaders": msg_cfheaders,
     b"cfilter": msg_cfilter,
     b"cmpctblock": msg_cmpctblock,
     b"feefilter": msg_feefilter,
     b"filteradd": msg_filteradd,
     b"filterclear": msg_filterclear,
     b"filterload": msg_filterload,
     b"getaddr": msg_getaddr,
     b"getblocks": msg_getblocks,
     b"getblocktxn": msg_getblocktxn,
     b"getdata": msg_getdata,
     b"getheaders": msg_getheaders,
     b"headers": msg_headers,
     b"inv": msg_inv,
     b"mempool": msg_mempool,
     b"merkleblock": msg_merkleblock,
     b"notfound": msg_notfound,
     b"ping": msg_ping,
     b"pong": msg_pong,
     b"sendaddrv2": msg_sendaddrv2,
     b"sendcmpct": msg_sendcmpct,
     b"sendheaders": msg_sendheaders,
     b"tx": msg_tx,
     b"verack": msg_verack,
     b"version": msg_version,
 }
 
 MAGIC_BYTES = {
     "mainnet": b"\xe3\xe1\xf3\xe8",
     "testnet3": b"\xf4\xe5\xf3\xf4",
     "regtest": b"\xda\xb5\xbf\xfa",
 }
 
 
 class P2PConnection(asyncio.Protocol):
     """A low-level connection object to a node's P2P interface.
 
     This class is responsible for:
 
     - opening and closing the TCP connection to the node
     - reading bytes from and writing bytes to the socket
     - deserializing and serializing the P2P message header
     - logging messages as they are sent and received
 
     This class contains no logic for handing the P2P message payloads. It must be
     sub-classed and the on_message() callback overridden."""
 
     def __init__(self):
         # The underlying transport of the connection.
         # Should only call methods on this from the NetworkThread, c.f.
         # call_soon_threadsafe
         self._transport = None
 
     @property
     def is_connected(self):
         return self._transport is not None
 
     def peer_connect(self, dstaddr, dstport, *, net, timeout_factor):
         assert not self.is_connected
         self.timeout_factor = timeout_factor
         self.dstaddr = dstaddr
         self.dstport = dstport
         # The initial message to send after the connection was made:
         self.on_connection_send_msg = None
         self.on_connection_send_msg_is_raw = False
         self.recvbuf = b""
         self.magic_bytes = MAGIC_BYTES[net]
         logger.debug('Connecting to Bitcoin Node: {}:{}'.format(
             self.dstaddr, self.dstport))
 
         loop = NetworkThread.network_event_loop
         conn_gen_unsafe = loop.create_connection(
             lambda: self, host=self.dstaddr, port=self.dstport)
 
         def conn_gen(): return loop.call_soon_threadsafe(
             loop.create_task, conn_gen_unsafe)
         return conn_gen
 
     def peer_disconnect(self):
         # Connection could have already been closed by other end.
         NetworkThread.network_event_loop.call_soon_threadsafe(
             lambda: self._transport and self._transport.abort())
 
     # Connection and disconnection methods
 
     def connection_made(self, transport):
         """asyncio callback when a connection is opened."""
         assert not self._transport
         logger.debug("Connected & Listening: {}:{}".format(
             self.dstaddr, self.dstport))
         self._transport = transport
         if self.on_connection_send_msg:
             if self.on_connection_send_msg_is_raw:
                 self.send_raw_message(self.on_connection_send_msg)
             else:
                 self.send_message(self.on_connection_send_msg)
             # Never used again
             self.on_connection_send_msg = None
         self.on_open()
 
     def connection_lost(self, exc):
         """asyncio callback when a connection is closed."""
         if exc:
             logger.warning("Connection lost to {}:{} due to {}".format(
                 self.dstaddr, self.dstport, exc))
         else:
             logger.debug("Closed connection to: {}:{}".format(
                 self.dstaddr, self.dstport))
         self._transport = None
         self.recvbuf = b""
         self.on_close()
 
     # Socket read methods
 
     def data_received(self, t):
         """asyncio callback when data is read from the socket."""
         with p2p_lock:
             if len(t) > 0:
                 self.recvbuf += t
 
         while True:
             msg = self._on_data()
             if msg is None:
                 break
             self.on_message(msg)
 
     def _on_data(self):
         """Try to read P2P messages from the recv buffer.
 
         This method reads data from the buffer in a loop. It deserializes,
         parses and verifies the P2P header, then passes the P2P payload to
         the on_message callback for processing."""
         try:
             with p2p_lock:
                 if len(self.recvbuf) < 4:
                     return None
                 if self.recvbuf[:4] != self.magic_bytes:
                     raise ValueError(
                         "magic bytes mismatch: {} != {}".format(
                             repr(
                                 self.magic_bytes), repr(
                                 self.recvbuf)))
                 if len(self.recvbuf) < 4 + 12 + 4 + 4:
                     return None
                 msgtype = self.recvbuf[4:4 + 12].split(b"\x00", 1)[0]
                 msglen = struct.unpack(
                     "<i", self.recvbuf[4 + 12:4 + 12 + 4])[0]
                 checksum = self.recvbuf[4 + 12 + 4:4 + 12 + 4 + 4]
                 if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
                     return None
                 msg = self.recvbuf[4 + 12 + 4 + 4:4 + 12 + 4 + 4 + msglen]
                 h = sha256(sha256(msg))
                 if checksum != h[:4]:
                     raise ValueError("got bad checksum " + repr(self.recvbuf))
                 self.recvbuf = self.recvbuf[4 + 12 + 4 + 4 + msglen:]
                 if msgtype not in MESSAGEMAP:
                     raise ValueError("Received unknown msgtype from {}:{}: '{}' {}".format(
                         self.dstaddr, self.dstport, msgtype, repr(msg)))
                 f = BytesIO(msg)
                 m = MESSAGEMAP[msgtype]()
                 m.deserialize(f)
                 self._log_message("receive", m)
                 return m
         except Exception as e:
             logger.exception('Error reading message:', repr(e))
             raise
 
     def on_message(self, message):
         """Callback for processing a P2P payload. Must be overridden by derived class."""
         raise NotImplementedError
 
     # Socket write methods
 
     def send_message(self, message):
         """Send a P2P message over the socket.
 
         This method takes a P2P payload, builds the P2P header and adds
         the message to the send buffer to be sent over the socket."""
         if not self.is_connected:
             raise IOError('Not connected')
         tmsg = self.build_message(message)
         self._log_message("send", message)
         return self.send_raw_message(tmsg)
 
     def send_raw_message(self, raw_message_bytes):
         """Send any raw message over the socket.
 
         This method adds a raw message to the send buffer to be sent over the
         socket."""
         if not self.is_connected:
             raise IOError('Not connected')
 
         def maybe_write():
             if not self._transport:
                 return
             if self._transport.is_closing():
                 return
             self._transport.write(raw_message_bytes)
         NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
 
     # Class utility methods
 
     def build_message(self, message):
         """Build a serialized P2P message"""
         msgtype = message.msgtype
         data = message.serialize()
         tmsg = self.magic_bytes
         tmsg += msgtype
         tmsg += b"\x00" * (12 - len(msgtype))
         tmsg += struct.pack("<I", len(data))
         th = sha256(data)
         h = sha256(th)
         tmsg += h[:4]
         tmsg += data
         return tmsg
 
     def _log_message(self, direction, msg):
         """Logs a message being sent or received over the connection."""
         if direction == "send":
             log_message = "Send message to "
         elif direction == "receive":
             log_message = "Received message from "
         log_message += "{}:{}: {}".format(
             self.dstaddr, self.dstport, repr(msg)[:500])
         if len(log_message) > 500:
             log_message += "... (msg truncated)"
         logger.debug(log_message)
 
 
 class P2PInterface(P2PConnection):
     """A high-level P2P interface class for communicating with a Bitcoin Cash node.
 
     This class provides high-level callbacks for processing P2P message
     payloads, as well as convenience methods for interacting with the
     node over P2P.
 
     Individual testcases should subclass this and override the on_* methods
     if they want to alter message handling behaviour."""
 
     def __init__(self, support_addrv2=False):
         super().__init__()
 
         # Track number of messages of each type received.
         # Should be read-only in a test.
         self.message_count = defaultdict(int)
 
         # Track the most recent message of each type.
         # To wait for a message to be received, pop that message from
         # this and use self.wait_until.
         self.last_message = {}
 
         # A count of the number of ping messages we've sent to the node
         self.ping_counter = 1
 
         # The network services received from the peer
         self.nServices = 0
 
         self.support_addrv2 = support_addrv2
 
     def peer_connect(self, *args, services=NODE_NETWORK,
                      send_version=True, **kwargs):
         create_conn = super().peer_connect(*args, **kwargs)
 
         if send_version:
             # Send a version msg
             vt = msg_version()
             vt.nServices = services
             vt.addrTo.ip = self.dstaddr
             vt.addrTo.port = self.dstport
             vt.addrFrom.ip = "0.0.0.0"
             vt.addrFrom.port = 0
 
             # Will be sent soon after connection_made
             self.on_connection_send_msg = vt
 
         return create_conn
 
     # Message receiving methods
 
     def on_message(self, message):
         """Receive message and dispatch message to appropriate callback.
 
         We keep a count of how many of each message type has been received
         and the most recent message of each type."""
         with p2p_lock:
             try:
                 msgtype = message.msgtype.decode('ascii')
                 self.message_count[msgtype] += 1
                 self.last_message[msgtype] = message
                 getattr(self, 'on_' + msgtype)(message)
             except Exception:
                 print("ERROR delivering {} ({})".format(
                     repr(message), sys.exc_info()[0]))
                 raise
 
     # Callback methods. Can be overridden by subclasses in individual test
     # cases to provide custom message handling behaviour.
 
     def on_open(self):
         pass
 
     def on_close(self):
         pass
 
     def on_addr(self, message): pass
 
     def on_addrv2(self, message): pass
 
     def on_avapoll(self, message): pass
 
     def on_avaproof(self, message): pass
 
     def on_avaresponse(self, message): pass
 
     def on_avahello(self, message): pass
 
     def on_block(self, message): pass
 
     def on_blocktxn(self, message): pass
 
     def on_cfcheckpt(self, message): pass
 
     def on_cfheaders(self, message): pass
 
     def on_cfilter(self, message): pass
 
     def on_cmpctblock(self, message): pass
 
     def on_feefilter(self, message): pass
 
     def on_filteradd(self, message): pass
 
     def on_filterclear(self, message): pass
 
     def on_filterload(self, message): pass
 
     def on_getaddr(self, message): pass
 
     def on_getblocks(self, message): pass
 
     def on_getblocktxn(self, message): pass
 
     def on_getdata(self, message): pass
 
     def on_getheaders(self, message): pass
 
     def on_headers(self, message): pass
 
     def on_mempool(self, message): pass
 
     def on_merkleblock(self, message): pass
 
     def on_notfound(self, message): pass
 
     def on_pong(self, message): pass
 
     def on_sendaddrv2(self, message): pass
 
     def on_sendcmpct(self, message): pass
 
     def on_sendheaders(self, message): pass
 
     def on_tx(self, message): pass
 
     def on_inv(self, message):
         want = msg_getdata()
         for i in message.inv:
             if i.type != 0:
                 want.inv.append(i)
         if len(want.inv):
             self.send_message(want)
 
     def on_ping(self, message):
         self.send_message(msg_pong(message.nonce))
 
     def on_verack(self, message):
         pass
 
     def on_version(self, message):
         assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(
             message.nVersion, MIN_VERSION_SUPPORTED)
         self.send_message(msg_verack())
         if self.support_addrv2:
             self.send_message(msg_sendaddrv2())
         self.nServices = message.nServices
 
     # Connection helper methods
 
     def wait_until(self, test_function_in, *, timeout=60,
                    check_connected=True):
         def test_function():
             if check_connected:
                 assert self.is_connected
             return test_function_in()
 
         wait_until_helper(test_function, timeout=timeout, lock=p2p_lock,
                           timeout_factor=self.timeout_factor)
 
     def wait_for_disconnect(self, timeout=60):
         def test_function(): return not self.is_connected
         self.wait_until(test_function, timeout=timeout, check_connected=False)
 
     # Message receiving helper methods
 
     def wait_for_tx(self, txid, timeout=60):
         def test_function():
             if not self.last_message.get('tx'):
                 return False
             return self.last_message['tx'].tx.rehash() == txid
 
         self.wait_until(test_function, timeout=timeout)
 
     def wait_for_block(self, blockhash, timeout=60):
         def test_function():
             return self.last_message.get(
                 "block") and self.last_message["block"].block.rehash() == blockhash
 
         self.wait_until(test_function, timeout=timeout)
 
     def wait_for_header(self, blockhash, timeout=60):
         def test_function():
             last_headers = self.last_message.get('headers')
             if not last_headers:
                 return False
             return last_headers.headers[0].rehash() == int(blockhash, 16)
 
         self.wait_until(test_function, timeout=timeout)
 
     def wait_for_merkleblock(self, blockhash, timeout=60):
         def test_function():
             last_filtered_block = self.last_message.get('merkleblock')
             if not last_filtered_block:
                 return False
             return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16)
 
         self.wait_until(test_function, timeout=timeout)
 
     def wait_for_getdata(self, hash_list, timeout=60):
         """Waits for a getdata message.
 
         The object hashes in the inventory vector must match the provided hash_list."""
         def test_function():
             last_data = self.last_message.get("getdata")
             if not last_data:
                 return False
             return [x.hash for x in last_data.inv] == hash_list
 
         self.wait_until(test_function, timeout=timeout)
 
     def wait_for_getheaders(self, timeout=60):
         """Waits for a getheaders message.
 
         Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
         value must be explicitly cleared before calling this method, or this will return
         immediately with success. TODO: change this method to take a hash value and only
         return true if the correct block header has been requested."""
         def test_function():
             return self.last_message.get("getheaders")
 
         self.wait_until(test_function, timeout=timeout)
 
     def wait_for_inv(self, expected_inv, timeout=60):
         """Waits for an INV message and checks that the first inv object in the message was as expected."""
         if len(expected_inv) > 1:
             raise NotImplementedError(
                 "wait_for_inv() will only verify the first inv object")
 
         def test_function():
             return self.last_message.get("inv") and \
                 self.last_message["inv"].inv[0].type == expected_inv[0].type and \
                 self.last_message["inv"].inv[0].hash == expected_inv[0].hash
 
         self.wait_until(test_function, timeout=timeout)
 
     def wait_for_verack(self, timeout=60):
         def test_function():
             return "verack" in self.last_message
 
         self.wait_until(test_function, timeout=timeout)
 
     # Message sending helper functions
 
     def send_and_ping(self, message, timeout=60):
         self.send_message(message)
         self.sync_with_ping(timeout=timeout)
 
     # Sync up with the node
     def sync_with_ping(self, timeout=60):
         self.send_message(msg_ping(nonce=self.ping_counter))
 
         def test_function():
             return self.last_message.get(
                 "pong") and self.last_message["pong"].nonce == self.ping_counter
 
         self.wait_until(test_function, timeout=timeout)
         self.ping_counter += 1
 
 
 # One lock for synchronizing all data access between the networking thread (see
 # NetworkThread below) and the thread running the test logic.  For simplicity,
 # P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
 # This lock should be acquired in the thread running the test logic to synchronize
 # access to any data shared with the P2PInterface or P2PConnection.
 p2p_lock = threading.Lock()
 
 
 class NetworkThread(threading.Thread):
     network_event_loop = None
 
     def __init__(self):
         super().__init__(name="NetworkThread")
         # There is only one event loop and no more than one thread must be
         # created
         assert not self.network_event_loop
 
         NetworkThread.network_event_loop = asyncio.new_event_loop()
 
     def run(self):
         """Start the network thread."""
         self.network_event_loop.run_forever()
 
     def close(self, timeout=10):
         """Close the connections and network event loop."""
         self.network_event_loop.call_soon_threadsafe(
             self.network_event_loop.stop)
         wait_until_helper(lambda: not self.network_event_loop.is_running(),
                           timeout=timeout)
         self.network_event_loop.close()
         self.join(timeout)
         # Safe to remove event loop.
         NetworkThread.network_event_loop = None
 
 
 class P2PDataStore(P2PInterface):
     """A P2P data store class.
 
     Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
 
     def __init__(self):
         super().__init__()
         # store of blocks. key is block hash, value is a CBlock object
         self.block_store = {}
         self.last_block_hash = ''
         # store of txs. key is txid, value is a CTransaction object
         self.tx_store = {}
         self.getdata_requests = []
 
     def on_getdata(self, message):
         """Check for the tx/block in our stores and if found, reply with an inv message."""
         for inv in message.inv:
             self.getdata_requests.append(inv.hash)
             if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
                 self.send_message(msg_tx(self.tx_store[inv.hash]))
             elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
                 self.send_message(msg_block(self.block_store[inv.hash]))
             else:
                 logger.debug(
                     'getdata message type {} received.'.format(hex(inv.type)))
 
     def on_getheaders(self, message):
         """Search back through our block store for the locator, and reply with a headers message if found."""
 
         locator, hash_stop = message.locator, message.hashstop
 
         # Assume that the most recent block added is the tip
         if not self.block_store:
             return
 
         headers_list = [self.block_store[self.last_block_hash]]
         while headers_list[-1].sha256 not in locator.vHave:
             # Walk back through the block store, adding headers to headers_list
             # as we go.
             prev_block_hash = headers_list[-1].hashPrevBlock
             if prev_block_hash in self.block_store:
                 prev_block_header = CBlockHeader(
                     self.block_store[prev_block_hash])
                 headers_list.append(prev_block_header)
                 if prev_block_header.sha256 == hash_stop:
                     # if this is the hashstop header, stop here
                     break
             else:
                 logger.debug('block hash {} not found in block store'.format(
                     hex(prev_block_hash)))
                 break
 
         # Truncate the list if there are too many headers
         headers_list = headers_list[:-MAX_HEADERS_RESULTS - 1:-1]
         response = msg_headers(headers_list)
 
         if response is not None:
             self.send_message(response)
 
     def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False,
                              reject_reason=None, expect_disconnect=False, timeout=60):
         """Send blocks to test node and test whether the tip advances.
 
          - add all blocks to our block_store
          - send a headers message for the final block
          - the on_getheaders handler will ensure that any getheaders are responded to
          - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will
            ensure that any getdata messages are responded to. Otherwise send the full block unsolicited.
          - if success is True: assert that the node's tip advances to the most recent block
          - if success is False: assert that the node's tip doesn't advance
          - if reject_reason is set: assert that the correct reject message is logged"""
 
         with p2p_lock:
             for block in blocks:
                 self.block_store[block.sha256] = block
                 self.last_block_hash = block.sha256
 
         def test():
             if force_send:
                 for b in blocks:
                     self.send_message(msg_block(block=b))
 
             else:
                 self.send_message(
                     msg_headers([CBlockHeader(block) for block in blocks]))
                 self.wait_until(
                     lambda: blocks[-1].sha256 in self.getdata_requests,
                     timeout=timeout,
                     check_connected=success,
                 )
 
             if expect_disconnect:
                 self.wait_for_disconnect(timeout=timeout)
             else:
                 self.sync_with_ping(timeout=timeout)
 
             if success:
                 self.wait_until(lambda: node.getbestblockhash() ==
                                 blocks[-1].hash, timeout=timeout)
             else:
                 assert node.getbestblockhash() != blocks[-1].hash
 
         if reject_reason:
             with node.assert_debug_log(expected_msgs=[reject_reason]):
                 test()
         else:
             test()
 
     def send_txs_and_test(self, txs, node, *, success=True,
                           expect_disconnect=False, reject_reason=None):
         """Send txs to test node and test whether they're accepted to the mempool.
 
          - add all txs to our tx_store
          - send tx messages for all txs
          - if success is True/False: assert that the txs are/are not accepted to the mempool
          - if expect_disconnect is True: Skip the sync with ping
          - if reject_reason is set: assert that the correct reject message is logged."""
 
         with p2p_lock:
             for tx in txs:
                 self.tx_store[tx.sha256] = tx
 
         def test():
             for tx in txs:
                 self.send_message(msg_tx(tx))
 
             if expect_disconnect:
                 self.wait_for_disconnect()
             else:
                 self.sync_with_ping()
 
             raw_mempool = node.getrawmempool()
             if success:
                 # Check that all txs are now in the mempool
                 for tx in txs:
                     assert tx.hash in raw_mempool, "{} not found in mempool".format(
                         tx.hash)
             else:
                 # Check that none of the txs are now in the mempool
                 for tx in txs:
                     assert tx.hash not in raw_mempool, "{} tx found in mempool".format(
                         tx.hash)
 
         if reject_reason:
             with node.assert_debug_log(expected_msgs=[reject_reason]):
                 test()
         else:
             test()
 
 
 class P2PTxInvStore(P2PInterface):
     """A P2PInterface which stores a count of how many times each txid has been announced."""
 
     def __init__(self):
         super().__init__()
         self.tx_invs_received = defaultdict(int)
 
     def on_inv(self, message):
         # Send getdata in response.
         super().on_inv(message)
         # Store how many times invs have been received for each tx.
         for i in message.inv:
             if i.type == MSG_TX:
                 # save txid
                 self.tx_invs_received[i.hash] += 1
 
     def get_invs(self):
         with p2p_lock:
             return list(self.tx_invs_received.keys())
 
     def wait_for_broadcast(self, txns, timeout=60):
         """Waits for the txns (list of txids) to complete initial broadcast.
         The mempool should mark unbroadcast=False for these transactions.
         """
         # Wait until invs have been received (and getdatas sent) for each txid.
         self.wait_until(lambda: set(self.tx_invs_received.keys()) == set(
             [int(tx, 16) for tx in txns]), timeout=timeout)
         # Flush messages and wait for the getdatas to be processed
         self.sync_with_ping()
diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py
index 57d5a7a2e..1f664830e 100644
--- a/test/functional/test_framework/script.py
+++ b/test/functional/test_framework/script.py
@@ -1,792 +1,791 @@
 #!/usr/bin/env python3
 # Copyright (c) 2015-2019 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Functionality to build scripts, as well as SignatureHash().
 
 This file is modified from python-bitcoinlib.
 """
 
 import hashlib
 import struct
 import unittest
-from typing import List, Dict
+from typing import Dict, List
 
 from .messages import (
     CTransaction,
     CTxOut,
     hash256,
     ser_string,
     ser_uint256,
     sha256,
     uint256_from_str,
 )
 
-
 MAX_SCRIPT_ELEMENT_SIZE = 520
 OPCODE_NAMES: Dict["CScriptOp", str] = {}
 
 
 def hash160(s):
     return hashlib.new('ripemd160', sha256(s)).digest()
 
 
 def bn2vch(v):
     """Convert number to bitcoin-specific little endian format."""
     # We need v.bit_length() bits, plus a sign bit for every nonzero number.
     n_bits = v.bit_length() + (v != 0)
     # The number of bytes for that is:
     n_bytes = (n_bits + 7) // 8
     # Convert number to absolute value + sign in top bit.
     encoded_v = 0 if v == 0 else abs(v) | ((v < 0) << (n_bytes * 8 - 1))
     # Serialize to bytes
     return encoded_v.to_bytes(n_bytes, 'little')
 
 
 _opcode_instances: List["CScriptOp"] = []
 
 
 class CScriptOp(int):
     """A single script opcode"""
     __slots__ = ()
 
     @staticmethod
     def encode_op_pushdata(d):
         """Encode a PUSHDATA op, returning bytes"""
         if len(d) < 0x4c:
             # OP_PUSHDATA
             return b'' + bytes([len(d)]) + d
         elif len(d) <= 0xff:
             # OP_PUSHDATA1
             return b'\x4c' + bytes([len(d)]) + d
         elif len(d) <= 0xffff:
             # OP_PUSHDATA2
             return b'\x4d' + struct.pack(b'<H', len(d)) + d
         elif len(d) <= 0xffffffff:
             # OP_PUSHDATA4
             return b'\x4e' + struct.pack(b'<I', len(d)) + d
         else:
             raise ValueError("Data too long to encode in a PUSHDATA op")
 
     @staticmethod
     def encode_op_n(n):
         """Encode a small integer op, returning an opcode"""
         if not (0 <= n <= 16):
             raise ValueError(
                 'Integer must be in range 0 <= n <= 16, got {}'.format(n))
 
         if n == 0:
             return OP_0
         else:
             return CScriptOp(OP_1 + n - 1)
 
     def decode_op_n(self):
         """Decode a small integer opcode, returning an integer"""
         if self == OP_0:
             return 0
 
         if not (self == OP_0 or OP_1 <= self <= OP_16):
             raise ValueError('op {!r} is not an OP_N'.format(self))
 
         return int(self - OP_1 + 1)
 
     def is_small_int(self):
         """Return true if the op pushes a small integer to the stack"""
         if 0x51 <= self <= 0x60 or self == 0:
             return True
         else:
             return False
 
     def __str__(self):
         return repr(self)
 
     def __repr__(self):
         if self in OPCODE_NAMES:
             return OPCODE_NAMES[self]
         else:
             return 'CScriptOp(0x{:x})'.format(self)
 
     def __new__(cls, n):
         try:
             return _opcode_instances[n]
         except IndexError:
             assert len(_opcode_instances) == n
             _opcode_instances.append(super().__new__(cls, n))
             return _opcode_instances[n]
 
 
 # Populate opcode instance table
 for n in range(0xff + 1):
     CScriptOp(n)
 
 
 # push value
 OP_0 = CScriptOp(0x00)
 OP_FALSE = OP_0
 OP_PUSHDATA1 = CScriptOp(0x4c)
 OP_PUSHDATA2 = CScriptOp(0x4d)
 OP_PUSHDATA4 = CScriptOp(0x4e)
 OP_1NEGATE = CScriptOp(0x4f)
 OP_RESERVED = CScriptOp(0x50)
 OP_1 = CScriptOp(0x51)
 OP_TRUE = OP_1
 OP_2 = CScriptOp(0x52)
 OP_3 = CScriptOp(0x53)
 OP_4 = CScriptOp(0x54)
 OP_5 = CScriptOp(0x55)
 OP_6 = CScriptOp(0x56)
 OP_7 = CScriptOp(0x57)
 OP_8 = CScriptOp(0x58)
 OP_9 = CScriptOp(0x59)
 OP_10 = CScriptOp(0x5a)
 OP_11 = CScriptOp(0x5b)
 OP_12 = CScriptOp(0x5c)
 OP_13 = CScriptOp(0x5d)
 OP_14 = CScriptOp(0x5e)
 OP_15 = CScriptOp(0x5f)
 OP_16 = CScriptOp(0x60)
 
 # control
 OP_NOP = CScriptOp(0x61)
 OP_VER = CScriptOp(0x62)
 OP_IF = CScriptOp(0x63)
 OP_NOTIF = CScriptOp(0x64)
 OP_VERIF = CScriptOp(0x65)
 OP_VERNOTIF = CScriptOp(0x66)
 OP_ELSE = CScriptOp(0x67)
 OP_ENDIF = CScriptOp(0x68)
 OP_VERIFY = CScriptOp(0x69)
 OP_RETURN = CScriptOp(0x6a)
 
 # stack ops
 OP_TOALTSTACK = CScriptOp(0x6b)
 OP_FROMALTSTACK = CScriptOp(0x6c)
 OP_2DROP = CScriptOp(0x6d)
 OP_2DUP = CScriptOp(0x6e)
 OP_3DUP = CScriptOp(0x6f)
 OP_2OVER = CScriptOp(0x70)
 OP_2ROT = CScriptOp(0x71)
 OP_2SWAP = CScriptOp(0x72)
 OP_IFDUP = CScriptOp(0x73)
 OP_DEPTH = CScriptOp(0x74)
 OP_DROP = CScriptOp(0x75)
 OP_DUP = CScriptOp(0x76)
 OP_NIP = CScriptOp(0x77)
 OP_OVER = CScriptOp(0x78)
 OP_PICK = CScriptOp(0x79)
 OP_ROLL = CScriptOp(0x7a)
 OP_ROT = CScriptOp(0x7b)
 OP_SWAP = CScriptOp(0x7c)
 OP_TUCK = CScriptOp(0x7d)
 
 # splice ops
 OP_CAT = CScriptOp(0x7e)
 OP_SPLIT = CScriptOp(0x7f)
 OP_NUM2BIN = CScriptOp(0x80)
 OP_BIN2NUM = CScriptOp(0x81)
 OP_SIZE = CScriptOp(0x82)
 
 # bit logic
 OP_INVERT = CScriptOp(0x83)
 OP_AND = CScriptOp(0x84)
 OP_OR = CScriptOp(0x85)
 OP_XOR = CScriptOp(0x86)
 OP_EQUAL = CScriptOp(0x87)
 OP_EQUALVERIFY = CScriptOp(0x88)
 OP_RESERVED1 = CScriptOp(0x89)
 OP_RESERVED2 = CScriptOp(0x8a)
 
 # numeric
 OP_1ADD = CScriptOp(0x8b)
 OP_1SUB = CScriptOp(0x8c)
 OP_2MUL = CScriptOp(0x8d)
 OP_2DIV = CScriptOp(0x8e)
 OP_NEGATE = CScriptOp(0x8f)
 OP_ABS = CScriptOp(0x90)
 OP_NOT = CScriptOp(0x91)
 OP_0NOTEQUAL = CScriptOp(0x92)
 
 OP_ADD = CScriptOp(0x93)
 OP_SUB = CScriptOp(0x94)
 OP_MUL = CScriptOp(0x95)
 OP_DIV = CScriptOp(0x96)
 OP_MOD = CScriptOp(0x97)
 OP_LSHIFT = CScriptOp(0x98)
 OP_RSHIFT = CScriptOp(0x99)
 
 OP_BOOLAND = CScriptOp(0x9a)
 OP_BOOLOR = CScriptOp(0x9b)
 OP_NUMEQUAL = CScriptOp(0x9c)
 OP_NUMEQUALVERIFY = CScriptOp(0x9d)
 OP_NUMNOTEQUAL = CScriptOp(0x9e)
 OP_LESSTHAN = CScriptOp(0x9f)
 OP_GREATERTHAN = CScriptOp(0xa0)
 OP_LESSTHANOREQUAL = CScriptOp(0xa1)
 OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
 OP_MIN = CScriptOp(0xa3)
 OP_MAX = CScriptOp(0xa4)
 
 OP_WITHIN = CScriptOp(0xa5)
 
 # crypto
 OP_RIPEMD160 = CScriptOp(0xa6)
 OP_SHA1 = CScriptOp(0xa7)
 OP_SHA256 = CScriptOp(0xa8)
 OP_HASH160 = CScriptOp(0xa9)
 OP_HASH256 = CScriptOp(0xaa)
 OP_CODESEPARATOR = CScriptOp(0xab)
 OP_CHECKSIG = CScriptOp(0xac)
 OP_CHECKSIGVERIFY = CScriptOp(0xad)
 OP_CHECKMULTISIG = CScriptOp(0xae)
 OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
 
 # expansion
 OP_NOP1 = CScriptOp(0xb0)
 OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
 OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
 OP_NOP4 = CScriptOp(0xb3)
 OP_NOP5 = CScriptOp(0xb4)
 OP_NOP6 = CScriptOp(0xb5)
 OP_NOP7 = CScriptOp(0xb6)
 OP_NOP8 = CScriptOp(0xb7)
 OP_NOP9 = CScriptOp(0xb8)
 OP_NOP10 = CScriptOp(0xb9)
 
 # more crypto
 OP_CHECKDATASIG = CScriptOp(0xba)
 OP_CHECKDATASIGVERIFY = CScriptOp(0xbb)
 
 # additional byte string operations
 OP_REVERSEBYTES = CScriptOp(0xbc)
 
 # multi-byte opcodes
 OP_PREFIX_BEGIN = CScriptOp(0xf0)
 OP_PREFIX_END = CScriptOp(0xf7)
 
 # template matching params
 OP_SMALLINTEGER = CScriptOp(0xfa)
 OP_PUBKEYS = CScriptOp(0xfb)
 OP_PUBKEYHASH = CScriptOp(0xfd)
 OP_PUBKEY = CScriptOp(0xfe)
 
 OP_INVALIDOPCODE = CScriptOp(0xff)
 
 OPCODE_NAMES.update({
     OP_0: 'OP_0',
     OP_PUSHDATA1: 'OP_PUSHDATA1',
     OP_PUSHDATA2: 'OP_PUSHDATA2',
     OP_PUSHDATA4: 'OP_PUSHDATA4',
     OP_1NEGATE: 'OP_1NEGATE',
     OP_RESERVED: 'OP_RESERVED',
     OP_1: 'OP_1',
     OP_2: 'OP_2',
     OP_3: 'OP_3',
     OP_4: 'OP_4',
     OP_5: 'OP_5',
     OP_6: 'OP_6',
     OP_7: 'OP_7',
     OP_8: 'OP_8',
     OP_9: 'OP_9',
     OP_10: 'OP_10',
     OP_11: 'OP_11',
     OP_12: 'OP_12',
     OP_13: 'OP_13',
     OP_14: 'OP_14',
     OP_15: 'OP_15',
     OP_16: 'OP_16',
     OP_NOP: 'OP_NOP',
     OP_VER: 'OP_VER',
     OP_IF: 'OP_IF',
     OP_NOTIF: 'OP_NOTIF',
     OP_VERIF: 'OP_VERIF',
     OP_VERNOTIF: 'OP_VERNOTIF',
     OP_ELSE: 'OP_ELSE',
     OP_ENDIF: 'OP_ENDIF',
     OP_VERIFY: 'OP_VERIFY',
     OP_RETURN: 'OP_RETURN',
     OP_TOALTSTACK: 'OP_TOALTSTACK',
     OP_FROMALTSTACK: 'OP_FROMALTSTACK',
     OP_2DROP: 'OP_2DROP',
     OP_2DUP: 'OP_2DUP',
     OP_3DUP: 'OP_3DUP',
     OP_2OVER: 'OP_2OVER',
     OP_2ROT: 'OP_2ROT',
     OP_2SWAP: 'OP_2SWAP',
     OP_IFDUP: 'OP_IFDUP',
     OP_DEPTH: 'OP_DEPTH',
     OP_DROP: 'OP_DROP',
     OP_DUP: 'OP_DUP',
     OP_NIP: 'OP_NIP',
     OP_OVER: 'OP_OVER',
     OP_PICK: 'OP_PICK',
     OP_ROLL: 'OP_ROLL',
     OP_ROT: 'OP_ROT',
     OP_SWAP: 'OP_SWAP',
     OP_TUCK: 'OP_TUCK',
     OP_CAT: 'OP_CAT',
     OP_SPLIT: 'OP_SPLIT',
     OP_NUM2BIN: 'OP_NUM2BIN',
     OP_BIN2NUM: 'OP_BIN2NUM',
     OP_SIZE: 'OP_SIZE',
     OP_INVERT: 'OP_INVERT',
     OP_AND: 'OP_AND',
     OP_OR: 'OP_OR',
     OP_XOR: 'OP_XOR',
     OP_EQUAL: 'OP_EQUAL',
     OP_EQUALVERIFY: 'OP_EQUALVERIFY',
     OP_RESERVED1: 'OP_RESERVED1',
     OP_RESERVED2: 'OP_RESERVED2',
     OP_1ADD: 'OP_1ADD',
     OP_1SUB: 'OP_1SUB',
     OP_2MUL: 'OP_2MUL',
     OP_2DIV: 'OP_2DIV',
     OP_NEGATE: 'OP_NEGATE',
     OP_ABS: 'OP_ABS',
     OP_NOT: 'OP_NOT',
     OP_0NOTEQUAL: 'OP_0NOTEQUAL',
     OP_ADD: 'OP_ADD',
     OP_SUB: 'OP_SUB',
     OP_MUL: 'OP_MUL',
     OP_DIV: 'OP_DIV',
     OP_MOD: 'OP_MOD',
     OP_LSHIFT: 'OP_LSHIFT',
     OP_RSHIFT: 'OP_RSHIFT',
     OP_BOOLAND: 'OP_BOOLAND',
     OP_BOOLOR: 'OP_BOOLOR',
     OP_NUMEQUAL: 'OP_NUMEQUAL',
     OP_NUMEQUALVERIFY: 'OP_NUMEQUALVERIFY',
     OP_NUMNOTEQUAL: 'OP_NUMNOTEQUAL',
     OP_LESSTHAN: 'OP_LESSTHAN',
     OP_GREATERTHAN: 'OP_GREATERTHAN',
     OP_LESSTHANOREQUAL: 'OP_LESSTHANOREQUAL',
     OP_GREATERTHANOREQUAL: 'OP_GREATERTHANOREQUAL',
     OP_MIN: 'OP_MIN',
     OP_MAX: 'OP_MAX',
     OP_WITHIN: 'OP_WITHIN',
     OP_RIPEMD160: 'OP_RIPEMD160',
     OP_SHA1: 'OP_SHA1',
     OP_SHA256: 'OP_SHA256',
     OP_HASH160: 'OP_HASH160',
     OP_HASH256: 'OP_HASH256',
     OP_CODESEPARATOR: 'OP_CODESEPARATOR',
     OP_CHECKSIG: 'OP_CHECKSIG',
     OP_CHECKSIGVERIFY: 'OP_CHECKSIGVERIFY',
     OP_CHECKMULTISIG: 'OP_CHECKMULTISIG',
     OP_CHECKMULTISIGVERIFY: 'OP_CHECKMULTISIGVERIFY',
     OP_CHECKDATASIG: 'OP_CHECKDATASIG',
     OP_CHECKDATASIGVERIFY: 'OP_CHECKDATASIGVERIFY',
     OP_NOP1: 'OP_NOP1',
     OP_CHECKLOCKTIMEVERIFY: 'OP_CHECKLOCKTIMEVERIFY',
     OP_CHECKSEQUENCEVERIFY: 'OP_CHECKSEQUENCEVERIFY',
     OP_NOP4: 'OP_NOP4',
     OP_NOP5: 'OP_NOP5',
     OP_NOP6: 'OP_NOP6',
     OP_NOP7: 'OP_NOP7',
     OP_NOP8: 'OP_NOP8',
     OP_NOP9: 'OP_NOP9',
     OP_NOP10: 'OP_NOP10',
     OP_SMALLINTEGER: 'OP_SMALLINTEGER',
     OP_PUBKEYS: 'OP_PUBKEYS',
     OP_PUBKEYHASH: 'OP_PUBKEYHASH',
     OP_PUBKEY: 'OP_PUBKEY',
     OP_INVALIDOPCODE: 'OP_INVALIDOPCODE',
 })
 
 
 class CScriptInvalidError(Exception):
     """Base class for CScript exceptions"""
     pass
 
 
 class CScriptTruncatedPushDataError(CScriptInvalidError):
     """Invalid pushdata due to truncation"""
 
     def __init__(self, msg, data):
         self.data = data
         super().__init__(msg)
 
 
 # This is used, eg, for blockchain heights in coinbase scripts (bip34)
 class CScriptNum:
     __slots__ = ("value",)
 
     def __init__(self, d=0):
         self.value = d
 
     @staticmethod
     def encode(obj):
         r = bytearray(0)
         if obj.value == 0:
             return bytes(r)
         neg = obj.value < 0
         absvalue = -obj.value if neg else obj.value
         while (absvalue):
             r.append(absvalue & 0xff)
             absvalue >>= 8
         if r[-1] & 0x80:
             r.append(0x80 if neg else 0)
         elif neg:
             r[-1] |= 0x80
         return bytes([len(r)]) + r
 
     @staticmethod
     def decode(vch):
         result = 0
         # We assume valid push_size and minimal encoding
         value = vch[1:]
         if len(value) == 0:
             return result
         for i, byte in enumerate(value):
             result |= int(byte) << 8 * i
         if value[-1] >= 0x80:
             # Mask for all but the highest result bit
             num_mask = (2**(len(value) * 8) - 1) >> 1
             result &= num_mask
             result *= -1
         return result
 
 
 class CScript(bytes):
     """Serialized script
 
     A bytes subclass, so you can use this directly whenever bytes are accepted.
     Note that this means that indexing does *not* work - you'll get an index by
     byte rather than opcode. This format was chosen for efficiency so that the
     general case would not require creating a lot of little CScriptOP objects.
 
     iter(script) however does iterate by opcode.
     """
     __slots__ = ()
 
     @classmethod
     def __coerce_instance(cls, other):
         # Coerce other into bytes
         if isinstance(other, CScriptOp):
             other = bytes([other])
         elif isinstance(other, CScriptNum):
             if (other.value == 0):
                 other = bytes([CScriptOp(OP_0)])
             else:
                 other = CScriptNum.encode(other)
         elif isinstance(other, int):
             if 0 <= other <= 16:
                 other = bytes([CScriptOp.encode_op_n(other)])
             elif other == -1:
                 other = bytes([OP_1NEGATE])
             else:
                 other = CScriptOp.encode_op_pushdata(bn2vch(other))
         elif isinstance(other, (bytes, bytearray)):
             other = CScriptOp.encode_op_pushdata(other)
         return other
 
     def __add__(self, other):
         # add makes no sense for a CScript()
         raise NotImplementedError
 
     def join(self, iterable):
         # join makes no sense for a CScript()
         raise NotImplementedError
 
     def __new__(cls, value=b''):
         if isinstance(value, bytes) or isinstance(value, bytearray):
             return super().__new__(cls, value)
         else:
             def coerce_iterable(iterable):
                 for instance in iterable:
                     yield cls.__coerce_instance(instance)
             # Annoyingly on both python2 and python3 bytes.join() always
             # returns a bytes instance even when subclassed.
             return super().__new__(
                 cls, b''.join(coerce_iterable(value)))
 
     def raw_iter(self):
         """Raw iteration
 
         Yields tuples of (opcode, data, sop_idx) so that the different possible
         PUSHDATA encodings can be accurately distinguished, as well as
         determining the exact opcode byte indexes. (sop_idx)
         """
         i = 0
         while i < len(self):
             sop_idx = i
             opcode = self[i]
             i += 1
 
             if opcode > OP_PUSHDATA4:
                 yield (opcode, None, sop_idx)
             else:
                 datasize = None
                 pushdata_type = None
                 if opcode < OP_PUSHDATA1:
                     pushdata_type = 'PUSHDATA({})'.format(opcode)
                     datasize = opcode
 
                 elif opcode == OP_PUSHDATA1:
                     pushdata_type = 'PUSHDATA1'
                     if i >= len(self):
                         raise CScriptInvalidError(
                             'PUSHDATA1: missing data length')
                     datasize = self[i]
                     i += 1
 
                 elif opcode == OP_PUSHDATA2:
                     pushdata_type = 'PUSHDATA2'
                     if i + 1 >= len(self):
                         raise CScriptInvalidError(
                             'PUSHDATA2: missing data length')
                     datasize = self[i] + (self[i + 1] << 8)
                     i += 2
 
                 elif opcode == OP_PUSHDATA4:
                     pushdata_type = 'PUSHDATA4'
                     if i + 3 >= len(self):
                         raise CScriptInvalidError(
                             'PUSHDATA4: missing data length')
                     datasize = self[i] + (self[i + 1] << 8) + \
                         (self[i + 2] << 16) + (self[i + 3] << 24)
                     i += 4
 
                 else:
                     assert False  # shouldn't happen
 
                 data = bytes(self[i:i + datasize])
 
                 # Check for truncation
                 if len(data) < datasize:
                     raise CScriptTruncatedPushDataError(
                         '{}: truncated data'.format(pushdata_type), data)
 
                 i += datasize
 
                 yield (opcode, data, sop_idx)
 
     def __iter__(self):
         """'Cooked' iteration
 
         Returns either a CScriptOP instance, an integer, or bytes, as
         appropriate.
 
         See raw_iter() if you need to distinguish the different possible
         PUSHDATA encodings.
         """
         for (opcode, data, sop_idx) in self.raw_iter():
             if data is not None:
                 yield data
             else:
                 opcode = CScriptOp(opcode)
 
                 if opcode.is_small_int():
                     yield opcode.decode_op_n()
                 else:
                     yield CScriptOp(opcode)
 
     def __repr__(self):
         def _repr(o):
             if isinstance(o, bytes):
                 return "x('{}')".format(o.hex())
             else:
                 return repr(o)
 
         ops = []
         i = iter(self)
         while True:
             op = None
             try:
                 op = _repr(next(i))
             except CScriptTruncatedPushDataError as err:
                 op = '{}...<ERROR: {}>'.format(_repr(err.data), err)
                 break
             except CScriptInvalidError as err:
                 op = '<ERROR: {}>'.format(err)
                 break
             except StopIteration:
                 break
             finally:
                 if op is not None:
                     ops.append(op)
 
         return "CScript([{}])".format(', '.join(ops))
 
     def GetSigOpCount(self, fAccurate):
         """Get the SigOp count.
 
         fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
 
         Note that this is consensus-critical.
         """
         n = 0
         lastOpcode = OP_INVALIDOPCODE
         for (opcode, data, sop_idx) in self.raw_iter():
             if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
                 n += 1
             elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
                 if fAccurate and (OP_1 <= lastOpcode <= OP_16):
                     n += opcode.decode_op_n()
                 else:
                     n += 20
             lastOpcode = opcode
         return n
 
 
 SIGHASH_ALL = 1
 SIGHASH_NONE = 2
 SIGHASH_SINGLE = 3
 SIGHASH_FORKID = 0x40
 SIGHASH_ANYONECANPAY = 0x80
 
 
 def FindAndDelete(script, sig):
     """Consensus critical, see FindAndDelete() in Satoshi codebase"""
     r = b''
     last_sop_idx = sop_idx = 0
     skip = True
     for (opcode, data, sop_idx) in script.raw_iter():
         if not skip:
             r += script[last_sop_idx:sop_idx]
         last_sop_idx = sop_idx
         if script[sop_idx:sop_idx + len(sig)] == sig:
             skip = True
         else:
             skip = False
     if not skip:
         r += script[last_sop_idx:]
     return CScript(r)
 
 
 def SignatureHash(script, txTo, inIdx, hashtype):
     """Consensus-correct SignatureHash
 
     Returns (hash, err) to precisely match the consensus-critical behavior of
     the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
     """
     HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
 
     if inIdx >= len(txTo.vin):
         return (HASH_ONE, "inIdx {} out of range ({})".format(
             inIdx, len(txTo.vin)))
     txtmp = CTransaction(txTo)
 
     for txin in txtmp.vin:
         txin.scriptSig = b''
     txtmp.vin[inIdx].scriptSig = FindAndDelete(
         script, CScript([OP_CODESEPARATOR]))
 
     if (hashtype & 0x1f) == SIGHASH_NONE:
         txtmp.vout = []
 
         for i in range(len(txtmp.vin)):
             if i != inIdx:
                 txtmp.vin[i].nSequence = 0
 
     elif (hashtype & 0x1f) == SIGHASH_SINGLE:
         outIdx = inIdx
         if outIdx >= len(txtmp.vout):
             return (HASH_ONE, "outIdx {} out of range ({})".format(
                 outIdx, len(txtmp.vout)))
 
         tmp = txtmp.vout[outIdx]
         txtmp.vout = []
         for _ in range(outIdx):
             txtmp.vout.append(CTxOut(-1))
         txtmp.vout.append(tmp)
 
         for i in range(len(txtmp.vin)):
             if i != inIdx:
                 txtmp.vin[i].nSequence = 0
 
     if hashtype & SIGHASH_ANYONECANPAY:
         tmp = txtmp.vin[inIdx]
         txtmp.vin = []
         txtmp.vin.append(tmp)
 
     s = txtmp.serialize()
     s += struct.pack(b"<I", hashtype)
 
     hash = hash256(s)
 
     return (hash, None)
 
 # TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
 # Performance optimization probably not necessary for python tests, however.
 
 
 def SignatureHashForkId(script, txTo, inIdx, hashtype, amount):
 
     hashPrevouts = 0
     hashSequence = 0
     hashOutputs = 0
 
     if not (hashtype & SIGHASH_ANYONECANPAY):
         serialize_prevouts = bytes()
         for i in txTo.vin:
             serialize_prevouts += i.prevout.serialize()
         hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
 
     if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f)
             != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
         serialize_sequence = bytes()
         for i in txTo.vin:
             serialize_sequence += struct.pack("<I", i.nSequence)
         hashSequence = uint256_from_str(hash256(serialize_sequence))
 
     if ((hashtype & 0x1f) != SIGHASH_SINGLE and (
             hashtype & 0x1f) != SIGHASH_NONE):
         serialize_outputs = bytes()
         for o in txTo.vout:
             serialize_outputs += o.serialize()
         hashOutputs = uint256_from_str(hash256(serialize_outputs))
     elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
         serialize_outputs = txTo.vout[inIdx].serialize()
         hashOutputs = uint256_from_str(hash256(serialize_outputs))
 
     ss = bytes()
     ss += struct.pack("<i", txTo.nVersion)
     ss += ser_uint256(hashPrevouts)
     ss += ser_uint256(hashSequence)
     ss += txTo.vin[inIdx].prevout.serialize()
     ss += ser_string(script)
     ss += struct.pack("<q", amount)
     ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
     ss += ser_uint256(hashOutputs)
     ss += struct.pack("<i", txTo.nLockTime)
     ss += struct.pack("<I", hashtype)
 
     return hash256(ss)
 
 
 class TestFrameworkScript(unittest.TestCase):
     def test_bn2vch(self):
         self.assertEqual(bn2vch(0), bytes([]))
         self.assertEqual(bn2vch(1), bytes([0x01]))
         self.assertEqual(bn2vch(-1), bytes([0x81]))
         self.assertEqual(bn2vch(0x7F), bytes([0x7F]))
         self.assertEqual(bn2vch(-0x7F), bytes([0xFF]))
         self.assertEqual(bn2vch(0x80), bytes([0x80, 0x00]))
         self.assertEqual(bn2vch(-0x80), bytes([0x80, 0x80]))
         self.assertEqual(bn2vch(0xFF), bytes([0xFF, 0x00]))
         self.assertEqual(bn2vch(-0xFF), bytes([0xFF, 0x80]))
         self.assertEqual(bn2vch(0x100), bytes([0x00, 0x01]))
         self.assertEqual(bn2vch(-0x100), bytes([0x00, 0x81]))
         self.assertEqual(bn2vch(0x7FFF), bytes([0xFF, 0x7F]))
         self.assertEqual(bn2vch(-0x8000), bytes([0x00, 0x80, 0x80]))
         self.assertEqual(bn2vch(-0x7FFFFF), bytes([0xFF, 0xFF, 0xFF]))
         self.assertEqual(bn2vch(0x80000000),
                          bytes([0x00, 0x00, 0x00, 0x80, 0x00]))
         self.assertEqual(bn2vch(-0x80000000),
                          bytes([0x00, 0x00, 0x00, 0x80, 0x80]))
         self.assertEqual(bn2vch(0xFFFFFFFF),
                          bytes([0xFF, 0xFF, 0xFF, 0xFF, 0x00]))
         self.assertEqual(bn2vch(123456789),
                          bytes([0x15, 0xCD, 0x5B, 0x07]))
         self.assertEqual(bn2vch(-54321),
                          bytes([0x31, 0xD4, 0x80]))
 
     def test_cscriptnum_encoding(self):
         # round-trip negative and multi-byte CScriptNums
         values = [0, 1, -1, -2, 127, 128, -255, 256, (1 << 15) - 1,
                   -(1 << 16), (1 << 24) - 1, (1 << 31), 1 - (1 << 32),
                   1 << 40, 1500, -1500]
         for value in values:
             self.assertEqual(
                 CScriptNum.decode(CScriptNum.encode(CScriptNum(value))),
                 value)
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index bc8c5dbf5..5b2e3454d 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -1,792 +1,792 @@
 #!/usr/bin/env python3
 # Copyright (c) 2014-2019 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Base class for RPC testing."""
 
 import argparse
 import configparser
-from enum import Enum
 import logging
 import os
 import pdb
 import random
 import shutil
 import sys
 import tempfile
 import time
+from enum import Enum
 from typing import Optional
 
+from . import coverage
 from .authproxy import JSONRPCException
 from .avatools import get_proof_ids
-from . import coverage
 from .p2p import NetworkThread
 from .test_node import TestNode
 from .util import (
+    MAX_NODES,
+    PortSeed,
     assert_equal,
     check_json_precision,
     connect_nodes,
     disconnect_nodes,
     get_datadir_path,
     initialize_datadir,
-    MAX_NODES,
     p2p_port,
-    PortSeed,
     rpc_port,
     wait_until_helper,
 )
 
 
 class TestStatus(Enum):
     PASSED = 1
     FAILED = 2
     SKIPPED = 3
 
 
 TEST_EXIT_PASSED = 0
 TEST_EXIT_FAILED = 1
 TEST_EXIT_SKIPPED = 77
 
 # Timestamp is Dec. 1st, 2019 at 00:00:00
 TIMESTAMP_IN_THE_PAST = 1575158400
 
 TMPDIR_PREFIX = "bitcoin_func_test_"
 
 
 class SkipTest(Exception):
     """This exception is raised to skip a test"""
 
     def __init__(self, message):
         self.message = message
 
 
 class BitcoinTestMetaClass(type):
     """Metaclass for BitcoinTestFramework.
 
     Ensures that any attempt to register a subclass of `BitcoinTestFramework`
     adheres to a standard whereby the subclass overrides `set_test_params` and
     `run_test` but DOES NOT override either `__init__` or `main`. If any of
     those standards are violated, a ``TypeError`` is raised."""
 
     def __new__(cls, clsname, bases, dct):
         if not clsname == 'BitcoinTestFramework':
             if not ('run_test' in dct and 'set_test_params' in dct):
                 raise TypeError("BitcoinTestFramework subclasses must override "
                                 "'run_test' and 'set_test_params'")
             if '__init__' in dct or 'main' in dct:
                 raise TypeError("BitcoinTestFramework subclasses may not override "
                                 "'__init__' or 'main'")
 
         return super().__new__(cls, clsname, bases, dct)
 
 
 class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
     """Base class for a bitcoin test script.
 
     Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
 
     Individual tests can also override the following methods to customize the test setup:
 
     - add_options()
     - setup_chain()
     - setup_network()
     - setup_nodes()
 
     The __init__() and main() methods should not be overridden.
 
     This class also contains various public and private helper methods."""
 
     chain: Optional[str] = None
     setup_clean_chain: Optional[bool] = None
 
     def __init__(self):
         """Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
         self.chain = 'regtest'
         self.setup_clean_chain = False
         self.nodes = []
         self.network_thread = None
         # Wait for up to 60 seconds for the RPC server to respond
         self.rpc_timeout = 60
         self.supports_cli = True
         self.bind_to_localhost_only = True
         # We run parse_args before set_test_params for tests who need to
         # know the parser options during setup.
         self.parse_args()
         self.set_test_params()
         if self.options.timeout_factor == 0:
             self.options.timeout_factor = 99999
         # optionally, increase timeout by a factor
         self.rpc_timeout = int(self.rpc_timeout * self.options.timeout_factor)
 
     def main(self):
         """Main function. This should not be overridden by the subclass test scripts."""
         assert hasattr(
             self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
 
         try:
             self.setup()
             self.run_test()
         except JSONRPCException:
             self.log.exception("JSONRPC error")
             self.success = TestStatus.FAILED
         except SkipTest as e:
             self.log.warning("Test Skipped: {}".format(e.message))
             self.success = TestStatus.SKIPPED
         except AssertionError:
             self.log.exception("Assertion failed")
             self.success = TestStatus.FAILED
         except KeyError:
             self.log.exception("Key error")
             self.success = TestStatus.FAILED
         except Exception:
             self.log.exception("Unexpected exception caught during testing")
             self.success = TestStatus.FAILED
         except KeyboardInterrupt:
             self.log.warning("Exiting after keyboard interrupt")
             self.success = TestStatus.FAILED
         finally:
             exit_code = self.shutdown()
             sys.exit(exit_code)
 
     def parse_args(self):
         parser = argparse.ArgumentParser(usage="%(prog)s [options]")
         parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
                             help="Leave bitcoinds and test.* datadir on exit or error")
         parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
                             help="Don't stop bitcoinds after the test execution")
         parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
                             help="Directory for caching pregenerated datadirs (default: %(default)s)")
         parser.add_argument("--tmpdir", dest="tmpdir",
                             help="Root directory for datadirs")
         parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
                             help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
         parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
                             help="Print out all RPC calls as they are made")
         parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
                             help="The seed to use for assigning port numbers (default: current process id)")
         parser.add_argument("--coveragedir", dest="coveragedir",
                             help="Write tested RPC commands into this directory")
         parser.add_argument("--configfile", dest="configfile", default=os.path.abspath(os.path.dirname(os.path.realpath(
             __file__)) + "/../../config.ini"), help="Location of the test framework config file (default: %(default)s)")
         parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
                             help="Attach a python debugger if test fails")
         parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
                             help="use bitcoin-cli instead of RPC for all commands")
         parser.add_argument("--perf", dest="perf", default=False, action="store_true",
                             help="profile running nodes with perf for the duration of the test")
         parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true",
                             help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required")
         parser.add_argument("--randomseed", type=int,
                             help="set a random seed for deterministically reproducing a previous test run")
         parser.add_argument("--with-axionactivation", dest="axionactivation", default=False, action="store_true",
                             help="Activate axion update on timestamp {}".format(TIMESTAMP_IN_THE_PAST))
         parser.add_argument(
             '--timeout-factor',
             dest="timeout_factor",
             type=float,
             default=1.0,
             help='adjust test timeouts by a factor. '
                  'Setting it to 0 disables all timeouts')
 
         self.add_options(parser)
         self.options = parser.parse_args()
 
     def setup(self):
         """Call this method to start up the test framework object with options set."""
         PortSeed.n = self.options.port_seed
 
         check_json_precision()
 
         self.options.cachedir = os.path.abspath(self.options.cachedir)
 
         config = configparser.ConfigParser()
         config.read_file(open(self.options.configfile, encoding='utf-8'))
         self.config = config
         fname_bitcoind = os.path.join(
             config["environment"]["BUILDDIR"],
             "src",
             "bitcoind" + config["environment"]["EXEEXT"]
         )
         fname_bitcoincli = os.path.join(
             config["environment"]["BUILDDIR"],
             "src",
             "bitcoin-cli" + config["environment"]["EXEEXT"]
         )
         self.options.bitcoind = os.getenv("BITCOIND", default=fname_bitcoind)
         self.options.bitcoincli = os.getenv(
             "BITCOINCLI", default=fname_bitcoincli)
         self.options.emulator = config["environment"]["EMULATOR"] or None
 
         os.environ['PATH'] = config['environment']['BUILDDIR'] + os.pathsep + \
             config['environment']['BUILDDIR'] + os.path.sep + "qt" + os.pathsep + \
             os.environ['PATH']
 
         # Set up temp directory and start logging
         if self.options.tmpdir:
             self.options.tmpdir = os.path.abspath(self.options.tmpdir)
             os.makedirs(self.options.tmpdir, exist_ok=False)
         else:
             self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
         self._start_logging()
 
         # Seed the PRNG. Note that test runs are reproducible if and only if
         # a single thread accesses the PRNG. For more information, see
         # https://docs.python.org/3/library/random.html#notes-on-reproducibility.
         # The network thread shouldn't access random. If we need to change the
         # network thread to access randomness, it should instantiate its own
         # random.Random object.
         seed = self.options.randomseed
 
         if seed is None:
             seed = random.randrange(sys.maxsize)
         else:
             self.log.debug("User supplied random seed {}".format(seed))
 
         random.seed(seed)
         self.log.debug("PRNG seed is: {}".format(seed))
 
         self.log.debug('Setting up network thread')
         self.network_thread = NetworkThread()
         self.network_thread.start()
 
         if self.options.usecli:
             if not self.supports_cli:
                 raise SkipTest(
                     "--usecli specified but test does not support using CLI")
             self.skip_if_no_cli()
         self.skip_test_if_missing_module()
         self.setup_chain()
         self.setup_network()
 
         self.success = TestStatus.PASSED
 
     def shutdown(self):
         """Call this method to shut down the test framework object."""
         if self.success == TestStatus.FAILED and self.options.pdbonfailure:
             print("Testcase failed. Attaching python debugger. Enter ? for help")
             pdb.set_trace()
 
         self.log.debug('Closing down network thread')
         self.network_thread.close()
         if not self.options.noshutdown:
             self.log.info("Stopping nodes")
             if self.nodes:
                 self.stop_nodes()
         else:
             for node in self.nodes:
                 node.cleanup_on_exit = False
             self.log.info(
                 "Note: bitcoinds were not stopped and may still be running")
 
         should_clean_up = (
             not self.options.nocleanup and
             not self.options.noshutdown and
             self.success != TestStatus.FAILED and
             not self.options.perf
         )
         if should_clean_up:
             self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
             cleanup_tree_on_exit = True
         elif self.options.perf:
             self.log.warning(
                 "Not cleaning up dir {} due to perf data".format(
                     self.options.tmpdir))
             cleanup_tree_on_exit = False
         else:
             self.log.warning(
                 "Not cleaning up dir {}".format(self.options.tmpdir))
             cleanup_tree_on_exit = False
 
         if self.success == TestStatus.PASSED:
             self.log.info("Tests successful")
             exit_code = TEST_EXIT_PASSED
         elif self.success == TestStatus.SKIPPED:
             self.log.info("Test skipped")
             exit_code = TEST_EXIT_SKIPPED
         else:
             self.log.error(
                 "Test failed. Test logging available at {}/test_framework.log".format(self.options.tmpdir))
             self.log.error("")
             self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(
                 os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
             self.log.error("")
             self.log.error(
                 "If this failure happened unexpectedly or intermittently, please"
                 " file a bug and provide a link or upload of the combined log.")
             self.log.error(self.config['environment']['PACKAGE_BUGREPORT'])
             self.log.error("")
             exit_code = TEST_EXIT_FAILED
         # Logging.shutdown will not remove stream- and filehandlers, so we must
         # do it explicitly. Handlers are removed so the next test run can apply
         # different log handler settings.
         # See: https://docs.python.org/3/library/logging.html#logging.shutdown
         for h in list(self.log.handlers):
             h.flush()
             h.close()
             self.log.removeHandler(h)
         rpc_logger = logging.getLogger("BitcoinRPC")
         for h in list(rpc_logger.handlers):
             h.flush()
             rpc_logger.removeHandler(h)
         if cleanup_tree_on_exit:
             shutil.rmtree(self.options.tmpdir)
 
         self.nodes.clear()
         return exit_code
 
     # Methods to override in subclass test scripts.
     def set_test_params(self):
         """Tests must this method to change default values for number of nodes, topology, etc"""
         raise NotImplementedError
 
     def add_options(self, parser):
         """Override this method to add command-line options to the test"""
         pass
 
     def skip_test_if_missing_module(self):
         """Override this method to skip a test if a module is not compiled"""
         pass
 
     def setup_chain(self):
         """Override this method to customize blockchain setup"""
         self.log.info("Initializing test directory " + self.options.tmpdir)
         if self.setup_clean_chain:
             self._initialize_chain_clean()
         else:
             self._initialize_chain()
 
     def setup_network(self):
         """Override this method to customize test network topology"""
         self.setup_nodes()
 
         # Connect the nodes as a "chain".  This allows us
         # to split the network between nodes 1 and 2 to get
         # two halves that can work on competing chains.
         #
         # Topology looks like this:
         # node0 <-- node1 <-- node2 <-- node3
         #
         # If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
         # ensure block propagation, all nodes will establish outgoing connections toward node0.
         # See fPreferredDownload in net_processing.
         #
         # If further outbound connections are needed, they can be added at the beginning of the test with e.g.
         # self.connect_nodes(1, 2)
         for i in range(self.num_nodes - 1):
             self.connect_nodes(i + 1, i)
         self.sync_all()
 
     def setup_nodes(self):
         """Override this method to customize test node setup"""
         extra_args = None
         if hasattr(self, "extra_args"):
             extra_args = self.extra_args
         self.add_nodes(self.num_nodes, extra_args)
         self.start_nodes()
         self.import_deterministic_coinbase_privkeys()
         if not self.setup_clean_chain:
             for n in self.nodes:
                 assert_equal(n.getblockchaininfo()["blocks"], 199)
             # To ensure that all nodes are out of IBD, the most recent block
             # must have a timestamp not too old (see IsInitialBlockDownload()).
             self.log.debug('Generate a block with current time')
             block_hash = self.nodes[0].generate(1)[0]
             block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
             for n in self.nodes:
                 n.submitblock(block)
                 chain_info = n.getblockchaininfo()
                 assert_equal(chain_info["blocks"], 200)
                 assert_equal(chain_info["initialblockdownload"], False)
 
     def import_deterministic_coinbase_privkeys(self):
         for n in self.nodes:
             try:
                 n.getwalletinfo()
             except JSONRPCException as e:
                 assert str(e).startswith('Method not found')
                 continue
 
             n.importprivkey(
                 privkey=n.get_deterministic_priv_key().key,
                 label='coinbase')
 
     def run_test(self):
         """Tests must override this method to define test logic"""
         raise NotImplementedError
 
     # Public helper methods. These can be accessed by the subclass test
     # scripts.
 
     def add_nodes(self, num_nodes: int, extra_args=None,
                   *, host=None, binary=None):
         """Instantiate TestNode objects.
 
         Should only be called once after the nodes have been specified in
         set_test_params()."""
         if self.bind_to_localhost_only:
             extra_confs = [["bind=127.0.0.1"]] * num_nodes
         else:
             extra_confs = [[]] * num_nodes
         if extra_args is None:
             extra_args = [[]] * num_nodes
         if binary is None:
             binary = [self.options.bitcoind] * num_nodes
         assert_equal(len(extra_confs), num_nodes)
         assert_equal(len(extra_args), num_nodes)
         assert_equal(len(binary), num_nodes)
         for i in range(num_nodes):
             self.nodes.append(TestNode(
                 i,
                 get_datadir_path(self.options.tmpdir, i),
                 chain=self.chain,
                 host=host,
                 rpc_port=rpc_port(i),
                 p2p_port=p2p_port(i),
                 timewait=self.rpc_timeout,
                 timeout_factor=self.options.timeout_factor,
                 bitcoind=binary[i],
                 bitcoin_cli=self.options.bitcoincli,
                 coverage_dir=self.options.coveragedir,
                 cwd=self.options.tmpdir,
                 extra_conf=extra_confs[i],
                 extra_args=extra_args[i],
                 use_cli=self.options.usecli,
                 emulator=self.options.emulator,
                 start_perf=self.options.perf,
                 use_valgrind=self.options.valgrind,
             ))
             if self.options.axionactivation:
                 self.nodes[i].extend_default_args(
                     ["-axionactivationtime={}".format(TIMESTAMP_IN_THE_PAST)])
 
     def start_node(self, i, *args, **kwargs):
         """Start a bitcoind"""
 
         node = self.nodes[i]
 
         node.start(*args, **kwargs)
         node.wait_for_rpc_connection()
 
         if self.options.coveragedir is not None:
             coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
 
     def start_nodes(self, extra_args=None, *args, **kwargs):
         """Start multiple bitcoinds"""
 
         if extra_args is None:
             extra_args = [None] * self.num_nodes
         assert_equal(len(extra_args), self.num_nodes)
         try:
             for i, node in enumerate(self.nodes):
                 node.start(extra_args[i], *args, **kwargs)
             for node in self.nodes:
                 node.wait_for_rpc_connection()
         except BaseException:
             # If one node failed to start, stop the others
             self.stop_nodes()
             raise
 
         if self.options.coveragedir is not None:
             for node in self.nodes:
                 coverage.write_all_rpc_commands(
                     self.options.coveragedir, node.rpc)
 
     def stop_node(self, i, expected_stderr='', wait=0):
         """Stop a bitcoind test node"""
         self.nodes[i].stop_node(expected_stderr, wait=wait)
 
     def stop_nodes(self, wait=0):
         """Stop multiple bitcoind test nodes"""
         for node in self.nodes:
             # Issue RPC to stop nodes
             node.stop_node(wait=wait, wait_until_stopped=False)
 
         for node in self.nodes:
             # Wait for nodes to stop
             node.wait_until_stopped()
 
     def restart_node(self, i, extra_args=None):
         """Stop and start a test node"""
         self.stop_node(i)
         self.start_node(i, extra_args)
 
     def wait_for_node_exit(self, i, timeout):
         self.nodes[i].process.wait(timeout)
 
     def connect_nodes(self, a, b):
         connect_nodes(self.nodes[a], self.nodes[b])
 
     def disconnect_nodes(self, a, b):
         disconnect_nodes(self.nodes[a], self.nodes[b])
 
     def split_network(self):
         """
         Split the network of four nodes into nodes 0/1 and 2/3.
         """
         self.disconnect_nodes(1, 2)
         self.sync_all(self.nodes[:2])
         self.sync_all(self.nodes[2:])
 
     def join_network(self):
         """
         Join the (previously split) network halves together.
         """
         self.connect_nodes(1, 2)
         self.sync_all()
 
     def sync_blocks(self, nodes=None, wait=1, timeout=60):
         """
         Wait until everybody has the same tip.
         sync_blocks needs to be called with an rpc_connections set that has least
         one node already synced to the latest, stable tip, otherwise there's a
         chance it might return before all nodes are stably synced.
         """
         rpc_connections = nodes or self.nodes
         timeout = int(timeout * self.options.timeout_factor)
         stop_time = time.time() + timeout
         while time.time() <= stop_time:
             best_hash = [x.getbestblockhash() for x in rpc_connections]
             if best_hash.count(best_hash[0]) == len(rpc_connections):
                 return
             # Check that each peer has at least one connection
             assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
             time.sleep(wait)
         raise AssertionError("Block sync timed out after {}s:{}".format(
             timeout,
             "".join("\n  {!r}".format(b) for b in best_hash),
         ))
 
     def sync_mempools(self, nodes=None, wait=1, timeout=60,
                       flush_scheduler=True):
         """
         Wait until everybody has the same transactions in their memory
         pools
         """
         rpc_connections = nodes or self.nodes
         timeout = int(timeout * self.options.timeout_factor)
         stop_time = time.time() + timeout
         while time.time() <= stop_time:
             pool = [set(r.getrawmempool()) for r in rpc_connections]
             if pool.count(pool[0]) == len(rpc_connections):
                 if flush_scheduler:
                     for r in rpc_connections:
                         r.syncwithvalidationinterfacequeue()
                 return
             # Check that each peer has at least one connection
             assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
             time.sleep(wait)
         raise AssertionError("Mempool sync timed out after {}s:{}".format(
             timeout,
             "".join("\n  {!r}".format(m) for m in pool),
         ))
 
     def sync_proofs(self, nodes=None, wait=1, timeout=60):
         """
         Wait until everybody has the same proofs in their proof pools
         """
         rpc_connections = nodes or self.nodes
         timeout = int(timeout * self.options.timeout_factor)
         stop_time = time.time() + timeout
 
         def format_ids(id_list):
             """Convert ProodIDs to hex strings for easier debugging"""
             return list(f"{i:064x}" for i in id_list)
 
         while time.time() <= stop_time:
             nodes_proofs = [
                 set(format_ids(get_proof_ids(r))) for r in rpc_connections]
             if nodes_proofs.count(nodes_proofs[0]) == len(rpc_connections):
                 return
             # Check that each peer has at least one connection
             assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
             time.sleep(wait)
         raise AssertionError("Proofs sync timed out after {}s:{}".format(
             timeout,
             "".join("\n  {!r}".format(m) for m in nodes_proofs),
         ))
 
     def sync_all(self, nodes=None):
         self.sync_blocks(nodes)
         self.sync_mempools(nodes)
 
     def wait_until(self, test_function, timeout=60):
         return wait_until_helper(test_function, timeout=timeout,
                                  timeout_factor=self.options.timeout_factor)
 
     # Private helper methods. These should not be accessed by the subclass
     # test scripts.
 
     def _start_logging(self):
         # Add logger and logging handlers
         self.log = logging.getLogger('TestFramework')
         self.log.setLevel(logging.DEBUG)
         # Create file handler to log all messages
         fh = logging.FileHandler(
             self.options.tmpdir + '/test_framework.log', encoding='utf-8')
         fh.setLevel(logging.DEBUG)
         # Create console handler to log messages to stderr. By default this
         # logs only error messages, but can be configured with --loglevel.
         ch = logging.StreamHandler(sys.stdout)
         # User can provide log level as a number or string (eg DEBUG). loglevel
         # was caught as a string, so try to convert it to an int
         ll = int(self.options.loglevel) if self.options.loglevel.isdigit(
         ) else self.options.loglevel.upper()
         ch.setLevel(ll)
         # Format logs the same as bitcoind's debug.log with microprecision (so
         # log files can be concatenated and sorted)
         formatter = logging.Formatter(
             fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
         formatter.converter = time.gmtime
         fh.setFormatter(formatter)
         ch.setFormatter(formatter)
         # add the handlers to the logger
         self.log.addHandler(fh)
         self.log.addHandler(ch)
 
         if self.options.trace_rpc:
             rpc_logger = logging.getLogger("BitcoinRPC")
             rpc_logger.setLevel(logging.DEBUG)
             rpc_handler = logging.StreamHandler(sys.stdout)
             rpc_handler.setLevel(logging.DEBUG)
             rpc_logger.addHandler(rpc_handler)
 
     def _initialize_chain(self):
         """Initialize a pre-mined blockchain for use by the test.
 
         Create a cache of a 199-block-long chain
         Afterward, create num_nodes copies from the cache."""
 
         # Use node 0 to create the cache for all other nodes
         CACHE_NODE_ID = 0
         cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
         assert self.num_nodes <= MAX_NODES
 
         if not os.path.isdir(cache_node_dir):
             self.log.debug(
                 "Creating cache directory {}".format(cache_node_dir))
 
             initialize_datadir(
                 self.options.cachedir,
                 CACHE_NODE_ID,
                 self.chain)
             self.nodes.append(
                 TestNode(
                     CACHE_NODE_ID,
                     cache_node_dir,
                     chain=self.chain,
                     extra_conf=["bind=127.0.0.1"],
                     extra_args=['-disablewallet'],
                     host=None,
                     rpc_port=rpc_port(CACHE_NODE_ID),
                     p2p_port=p2p_port(CACHE_NODE_ID),
                     timewait=self.rpc_timeout,
                     timeout_factor=self.options.timeout_factor,
                     bitcoind=self.options.bitcoind,
                     bitcoin_cli=self.options.bitcoincli,
                     coverage_dir=None,
                     cwd=self.options.tmpdir,
                     emulator=self.options.emulator,
                 ))
 
             if self.options.axionactivation:
                 self.nodes[CACHE_NODE_ID].extend_default_args(
                     ["-axionactivationtime={}".format(TIMESTAMP_IN_THE_PAST)])
 
             self.start_node(CACHE_NODE_ID)
             cache_node = self.nodes[CACHE_NODE_ID]
 
             # Wait for RPC connections to be ready
             cache_node.wait_for_rpc_connection()
 
             # Set a time in the past, so that blocks don't end up in the future
             cache_node.setmocktime(
                 cache_node.getblockheader(
                     cache_node.getbestblockhash())['time'])
 
             # Create a 199-block-long chain; each of the 4 first nodes
             # gets 25 mature blocks and 25 immature.
             # The 4th node gets only 24 immature blocks so that the very last
             # block in the cache does not age too much (have an old tip age).
             # This is needed so that we are out of IBD when the test starts,
             # see the tip age check in IsInitialBlockDownload().
             for i in range(8):
                 cache_node.generatetoaddress(
                     nblocks=25 if i != 7 else 24,
                     address=TestNode.PRIV_KEYS[i % 4].address,
                 )
 
             assert_equal(cache_node.getblockchaininfo()["blocks"], 199)
 
             # Shut it down, and clean up cache directories:
             self.stop_nodes()
             self.nodes = []
 
             def cache_path(*paths):
                 return os.path.join(cache_node_dir, self.chain, *paths)
 
             # Remove empty wallets dir
             os.rmdir(cache_path('wallets'))
             for entry in os.listdir(cache_path()):
                 # Only keep chainstate and blocks folder
                 if entry not in ['chainstate', 'blocks']:
                     os.remove(cache_path(entry))
 
         for i in range(self.num_nodes):
             self.log.debug(
                 "Copy cache directory {} to node {}".format(
                     cache_node_dir, i))
             to_dir = get_datadir_path(self.options.tmpdir, i)
             shutil.copytree(cache_node_dir, to_dir)
             # Overwrite port/rpcport in bitcoin.conf
             initialize_datadir(self.options.tmpdir, i, self.chain)
 
     def _initialize_chain_clean(self):
         """Initialize empty blockchain for use by the test.
 
         Create an empty blockchain and num_nodes wallets.
         Useful if a test case wants complete control over initialization."""
         for i in range(self.num_nodes):
             initialize_datadir(self.options.tmpdir, i, self.chain)
 
     def skip_if_no_py3_zmq(self):
         """Attempt to import the zmq package and skip the test if the import fails."""
         try:
             import zmq  # noqa
         except ImportError:
             raise SkipTest("python3-zmq module not available.")
 
     def skip_if_no_bitcoind_zmq(self):
         """Skip the running test if bitcoind has not been compiled with zmq support."""
         if not self.is_zmq_compiled():
             raise SkipTest("bitcoind has not been built with zmq enabled.")
 
     def skip_if_no_wallet(self):
         """Skip the running test if wallet has not been compiled."""
         if not self.is_wallet_compiled():
             raise SkipTest("wallet has not been compiled.")
 
     def skip_if_no_wallet_tool(self):
         """Skip the running test if bitcoin-wallet has not been compiled."""
         if not self.is_wallet_tool_compiled():
             raise SkipTest("bitcoin-wallet has not been compiled")
 
     def skip_if_no_cli(self):
         """Skip the running test if bitcoin-cli has not been compiled."""
         if not self.is_cli_compiled():
             raise SkipTest("bitcoin-cli has not been compiled.")
 
     def is_cli_compiled(self):
         """Checks whether bitcoin-cli was compiled."""
         return self.config["components"].getboolean("ENABLE_CLI")
 
     def is_wallet_compiled(self):
         """Checks whether the wallet module was compiled."""
         return self.config["components"].getboolean("ENABLE_WALLET")
 
     def is_wallet_tool_compiled(self):
         """Checks whether bitcoin-wallet was compiled."""
         return self.config["components"].getboolean("ENABLE_WALLET_TOOL")
 
     def is_zmq_compiled(self):
         """Checks whether the zmq module was compiled."""
         return self.config["components"].getboolean("ENABLE_ZMQ")
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index c0bd52efd..2f3e959ab 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -1,910 +1,910 @@
 #!/usr/bin/env python3
 # Copyright (c) 2017-2019 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Class for bitcoind node under test"""
 
+import collections
 import contextlib
 import decimal
-from enum import Enum
 import errno
 import http.client
 import json
 import logging
 import os
 import re
+import shlex
 import subprocess
 import sys
 import tempfile
 import time
 import urllib.parse
-import collections
-import shlex
+from enum import Enum
 
 from .authproxy import JSONRPCException
 from .descriptors import descsum_create
-from .messages import XEC, CTransaction, FromHex, MY_SUBVERSION
+from .messages import MY_SUBVERSION, XEC, CTransaction, FromHex
 from .util import (
     MAX_NODES,
+    EncodeDecimal,
     append_config,
     delete_cookie_file,
     get_auth_cookie,
     get_rpc_proxy,
     p2p_port,
     rpc_url,
     wait_until_helper,
-    EncodeDecimal,
 )
 
 BITCOIND_PROC_WAIT_TIMEOUT = 60
 
 
 class FailedToStartError(Exception):
     """Raised when a node fails to start correctly."""
 
 
 class ErrorMatch(Enum):
     FULL_TEXT = 1
     FULL_REGEX = 2
     PARTIAL_REGEX = 3
 
 
 class TestNode():
     """A class for representing a bitcoind node under test.
 
     This class contains:
 
     - state about the node (whether it's running, etc)
     - a Python subprocess.Popen object representing the running process
     - an RPC connection to the node
     - one or more P2P connections to the node
 
     To make things easier for the test writer, any unrecognised messages will
     be dispatched to the RPC connection."""
 
     def __init__(self, i, datadir, *, chain, host, rpc_port, p2p_port, timewait, timeout_factor, bitcoind, bitcoin_cli,
                  coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, emulator=None, start_perf=False, use_valgrind=False):
         """
         Kwargs:
             start_perf (bool): If True, begin profiling the node with `perf` as soon as
                 the node starts.
         """
 
         self.index = i
         self.datadir = datadir
         self.bitcoinconf = os.path.join(self.datadir, "bitcoin.conf")
         self.stdout_dir = os.path.join(self.datadir, "stdout")
         self.stderr_dir = os.path.join(self.datadir, "stderr")
         self.chain = chain
         self.host = host
         self.rpc_port = rpc_port
         self.p2p_port = p2p_port
         self.name = "testnode-{}".format(i)
         self.rpc_timeout = timewait
         self.binary = bitcoind
         if not os.path.isfile(self.binary):
             raise FileNotFoundError(
                 "Binary '{}' could not be found.\nTry setting it manually:\n\tBITCOIND=<path/to/bitcoind> {}".format(self.binary, sys.argv[0]))
         self.coverage_dir = coverage_dir
         self.cwd = cwd
         if extra_conf is not None:
             append_config(datadir, extra_conf)
         # Most callers will just need to add extra args to the default list
         # below.
         # For those callers that need more flexibility, they can access the
         # default args using the provided facilities.
         # Note that common args are set in the config file (see
         # initialize_datadir)
         self.extra_args = extra_args
         # Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
         # This means that starting a bitcoind using the temp dir to debug a failed test won't
         # spam debug.log.
         self.default_args = [
             "-datadir=" + self.datadir,
             "-logtimemicros",
             "-logthreadnames",
             "-debug",
             "-debugexclude=libevent",
             "-debugexclude=leveldb",
             "-uacomment=" + self.name,
             "-noprinttoconsole",
         ]
 
         if use_valgrind:
             default_suppressions_file = os.path.join(
                 os.path.dirname(os.path.realpath(__file__)),
                 "..", "..", "..", "contrib", "valgrind.supp")
             suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE",
                                           default_suppressions_file)
             self.binary = "valgrind"
             self.bitcoind_args = [bitcoind] + self.default_args
             self.default_args = ["--suppressions={}".format(suppressions_file),
                                  "--gen-suppressions=all", "--exit-on-first-error=yes",
                                  "--error-exitcode=1", "--quiet"] + self.bitcoind_args
 
         if emulator is not None:
             if not os.path.isfile(emulator):
                 raise FileNotFoundError(
                     "Emulator '{}' could not be found.".format(emulator))
         self.emulator = emulator
 
         if use_cli and not os.path.isfile(bitcoin_cli):
             raise FileNotFoundError(
                 "Binary '{}' could not be found.\nTry setting it manually:\n\tBITCOINCLI=<path/to/bitcoin-cli> {}".format(bitcoin_cli, sys.argv[0]))
         self.cli = TestNodeCLI(bitcoin_cli, self.datadir, self.emulator)
         self.use_cli = use_cli
         self.start_perf = start_perf
 
         self.running = False
         self.process = None
         self.rpc_connected = False
         self.rpc = None
         self.url = None
         self.relay_fee_cache = None
         self.log = logging.getLogger('TestFramework.node{}'.format(i))
         # Whether to kill the node when this object goes away
         self.cleanup_on_exit = True
         # Cache perf subprocesses here by their data output filename.
         self.perf_subprocesses = {}
         self.p2ps = []
         self.timeout_factor = timeout_factor
 
     AddressKeyPair = collections.namedtuple(
         'AddressKeyPair', ['address', 'key'])
     PRIV_KEYS = [
         # address , privkey
         AddressKeyPair(
             'mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z',
             'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
         AddressKeyPair(
             'msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg',
             'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
         AddressKeyPair(
             'mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP',
             'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
         AddressKeyPair(
             'mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR',
             'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
         AddressKeyPair(
             'msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws',
             'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
         AddressKeyPair(
             'n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi',
             'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
         AddressKeyPair(
             'myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6',
             'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
         AddressKeyPair(
             'mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8',
             'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
         AddressKeyPair(
             'mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg',
             'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
         AddressKeyPair(
             'mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf',
             'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'),
         AddressKeyPair(
             'mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6',
             'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'),
         AddressKeyPair(
             'mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7',
             'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'),
     ]
 
     def get_deterministic_priv_key(self):
         """Return a deterministic priv key in base58, that only depends on the node's index"""
         assert len(self.PRIV_KEYS) == MAX_NODES
         return self.PRIV_KEYS[self.index]
 
     def _node_msg(self, msg: str) -> str:
         """Return a modified msg that identifies this node by its index as a debugging aid."""
         return "[node {}] {}".format(self.index, msg)
 
     def _raise_assertion_error(self, msg: str):
         """Raise an AssertionError with msg modified to identify this node."""
         raise AssertionError(self._node_msg(msg))
 
     def __del__(self):
         # Ensure that we don't leave any bitcoind processes lying around after
         # the test ends
         if self.process and self.cleanup_on_exit:
             # Should only happen on test failure
             # Avoid using logger, as that may have already been shutdown when
             # this destructor is called.
             print(self._node_msg("Cleaning up leftover process"))
             self.process.kill()
 
     def __getattr__(self, name):
         """Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
         if self.use_cli:
             return getattr(RPCOverloadWrapper(self.cli, True), name)
         else:
             assert self.rpc is not None, self._node_msg(
                 "Error: RPC not initialized")
             assert self.rpc_connected, self._node_msg(
                 "Error: No RPC connection")
             return getattr(RPCOverloadWrapper(self.rpc), name)
 
     def clear_default_args(self):
         self.default_args.clear()
 
     def extend_default_args(self, args):
         self.default_args.extend(args)
 
     def remove_default_args(self, args):
         for rm_arg in args:
             # Remove all occurrences of rm_arg in self.default_args:
             #  - if the arg is a flag (-flag), then the names must match
             #  - if the arg is a value (-key=value) then the name must starts
             #    with "-key=" (the '"' char is to avoid removing "-key_suffix"
             #    arg is "-key" is the argument to remove).
             self.default_args = [def_arg for def_arg in self.default_args
                                  if rm_arg != def_arg and not def_arg.startswith(rm_arg + '=')]
 
     def start(self, extra_args=None, *, cwd=None, stdout=None,
               stderr=None, **kwargs):
         """Start the node."""
         if extra_args is None:
             extra_args = self.extra_args
 
         # Add a new stdout and stderr file each time bitcoind is started
         if stderr is None:
             stderr = tempfile.NamedTemporaryFile(
                 dir=self.stderr_dir, delete=False)
         if stdout is None:
             stdout = tempfile.NamedTemporaryFile(
                 dir=self.stdout_dir, delete=False)
         self.stderr = stderr
         self.stdout = stdout
 
         if cwd is None:
             cwd = self.cwd
 
         # Delete any existing cookie file -- if such a file exists (eg due to
         # unclean shutdown), it will get overwritten anyway by bitcoind, and
         # potentially interfere with our attempt to authenticate
         delete_cookie_file(self.datadir, self.chain)
 
         # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are
         # written to stderr and not the terminal
         subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
 
         p_args = [self.binary] + self.default_args + extra_args
         if self.emulator is not None:
             p_args = [self.emulator] + p_args
         self.process = subprocess.Popen(
             p_args,
             env=subp_env,
             stdout=stdout,
             stderr=stderr,
             cwd=cwd,
             **kwargs)
 
         self.running = True
         self.log.debug("bitcoind started, waiting for RPC to come up")
 
         if self.start_perf:
             self._start_perf()
 
     def wait_for_rpc_connection(self):
         """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
         # Poll at a rate of four times per second
         poll_per_s = 4
         for _ in range(poll_per_s * self.rpc_timeout):
             if self.process.poll() is not None:
                 raise FailedToStartError(self._node_msg(
                     'bitcoind exited with status {} during initialization'.format(self.process.returncode)))
             try:
                 rpc = get_rpc_proxy(
                     rpc_url(
                         self.datadir,
                         self.chain,
                         self.host,
                         self.rpc_port),
                     self.index,
                     # Shorter timeout to allow for one retry in case of
                     # ETIMEDOUT
                     timeout=self.rpc_timeout // 2,
                     coveragedir=self.coverage_dir
                 )
                 rpc.getblockcount()
                 # If the call to getblockcount() succeeds then the RPC
                 # connection is up
                 wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'],
                                   timeout_factor=self.timeout_factor)
                 # Wait for the node to finish reindex, block import, and
                 # loading the mempool. Usually importing happens fast or
                 # even "immediate" when the node is started. However, there
                 # is no guarantee and sometimes ThreadImport might finish
                 # later. This is going to cause intermittent test failures,
                 # because generally the tests assume the node is fully
                 # ready after being started.
                 #
                 # For example, the node will reject block messages from p2p
                 # when it is still importing with the error "Unexpected
                 # block message received"
                 #
                 # The wait is done here to make tests as robust as possible
                 # and prevent racy tests and intermittent failures as much
                 # as possible. Some tests might not need this, but the
                 # overhead is trivial, and the added guarantees are worth
                 # the minimal performance cost.
 
                 self.log.debug("RPC successfully started")
                 if self.use_cli:
                     return
                 self.rpc = rpc
                 self.rpc_connected = True
                 self.url = self.rpc.url
                 return
             except JSONRPCException as e:  # Initialization phase
                 # -28 RPC in warmup
                 # -342 Service unavailable, RPC server started but is shutting down due to error
                 if e.error['code'] != -28 and e.error['code'] != -342:
                     raise  # unknown JSON RPC exception
             except ConnectionResetError:
                 # This might happen when the RPC server is in warmup, but shut down before the call to getblockcount
                 # succeeds. Try again to properly raise the FailedToStartError
                 pass
             except OSError as e:
                 if e.errno == errno.ETIMEDOUT:
                     # Treat identical to ConnectionResetError
                     pass
                 elif e.errno == errno.ECONNREFUSED:
                     # Port not yet open?
                     pass
                 else:
                     # unknown OS error
                     raise
             except ValueError as e:
                 # cookie file not found and no rpcuser or rpcpassword;
                 # bitcoind is still starting
                 if "No RPC credentials" not in str(e):
                     raise
             time.sleep(1.0 / poll_per_s)
         self._raise_assertion_error(
             "Unable to connect to bitcoind after {}s".format(
                 self.rpc_timeout))
 
     def wait_for_cookie_credentials(self):
         """Ensures auth cookie credentials can be read, e.g. for testing CLI
         with -rpcwait before RPC connection is up."""
         self.log.debug("Waiting for cookie credentials")
         # Poll at a rate of four times per second.
         poll_per_s = 4
         for _ in range(poll_per_s * self.rpc_timeout):
             try:
                 get_auth_cookie(self.datadir, self.chain)
                 self.log.debug("Cookie credentials successfully retrieved")
                 return
             except ValueError:
                 # cookie file not found and no rpcuser or rpcpassword;
                 # bitcoind is still starting so we continue polling until
                 # RPC credentials are retrieved
                 pass
             time.sleep(1.0 / poll_per_s)
         self._raise_assertion_error(
             "Unable to retrieve cookie credentials after {}s".format(
                 self.rpc_timeout))
 
     def generate(self, nblocks, maxtries=1000000):
         self.log.debug(
             "TestNode.generate() dispatches `generate` call to `generatetoaddress`")
         return self.generatetoaddress(
             nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
 
     def get_wallet_rpc(self, wallet_name):
         if self.use_cli:
             return RPCOverloadWrapper(
                 self.cli("-rpcwallet={}".format(wallet_name)), True)
         else:
             assert self.rpc is not None, self._node_msg(
                 "Error: RPC not initialized")
             assert self.rpc_connected, self._node_msg(
                 "Error: RPC not connected")
             wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
             return RPCOverloadWrapper(self.rpc / wallet_path)
 
     def stop_node(self, expected_stderr='', *, wait=0,
                   wait_until_stopped=True):
         """Stop the node."""
         if not self.running:
             return
         self.log.debug("Stopping node")
         try:
             self.stop(wait=wait)
         except http.client.CannotSendRequest:
             self.log.exception("Unable to stop node.")
 
         # If there are any running perf processes, stop them.
         for profile_name in tuple(self.perf_subprocesses.keys()):
             self._stop_perf(profile_name)
 
         # Check that stderr is as expected
         self.stderr.seek(0)
         stderr = self.stderr.read().decode('utf-8').strip()
         if stderr != expected_stderr:
             raise AssertionError(
                 "Unexpected stderr {} != {}".format(stderr, expected_stderr))
 
         self.stdout.close()
         self.stderr.close()
 
         del self.p2ps[:]
 
         if wait_until_stopped:
             self.wait_until_stopped()
 
     def is_node_stopped(self):
         """Checks whether the node has stopped.
 
         Returns True if the node has stopped. False otherwise.
         This method is responsible for freeing resources (self.process)."""
         if not self.running:
             return True
         return_code = self.process.poll()
         if return_code is None:
             return False
 
         # process has stopped. Assert that it didn't return an error code.
         assert return_code == 0, self._node_msg(
             "Node returned non-zero exit code ({}) when stopping".format(return_code))
         self.running = False
         self.process = None
         self.rpc_connected = False
         self.rpc = None
         self.log.debug("Node stopped")
         return True
 
     def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
         wait_until_helper(
             self.is_node_stopped,
             timeout=timeout,
             timeout_factor=self.timeout_factor)
 
     @contextlib.contextmanager
     def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):
         """Assert that some debug messages are present within some timeout.
         Unexpected debug messages may be optionally provided to fail a test
         if they appear before expected messages.
 
         Note: expected_msgs must always be non-empty even if the goal is to check
         for unexpected_msgs. This provides a bounded scenario such that "we expect
         to reach some target resulting in expected_msgs without seeing unexpected_msgs.
         Otherwise, we are testing that something never happens, which is fundamentally
         not robust test logic.
         """
         if not expected_msgs:
             raise AssertionError("Expected debug messages is empty")
         if unexpected_msgs is None:
             unexpected_msgs = []
         time_end = time.time() + timeout * self.timeout_factor
         debug_log = os.path.join(self.datadir, self.chain, 'debug.log')
         with open(debug_log, encoding='utf-8') as dl:
             dl.seek(0, 2)
             prev_size = dl.tell()
 
         yield
 
         while True:
             found = True
             with open(debug_log, encoding='utf-8') as dl:
                 dl.seek(prev_size)
                 log = dl.read()
             print_log = " - " + "\n - ".join(log.splitlines())
             for unexpected_msg in unexpected_msgs:
                 if re.search(re.escape(unexpected_msg),
                              log, flags=re.MULTILINE):
                     self._raise_assertion_error(
                         'Unexpected message "{}" partially matches log:\n\n{}\n\n'.format(
                             unexpected_msg, print_log))
             for expected_msg in expected_msgs:
                 if re.search(re.escape(expected_msg), log,
                              flags=re.MULTILINE) is None:
                     found = False
             if found:
                 return
             if time.time() >= time_end:
                 break
             time.sleep(0.05)
         self._raise_assertion_error(
             'Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(
                 str(expected_msgs), print_log))
 
     @contextlib.contextmanager
     def profile_with_perf(self, profile_name):
         """
         Context manager that allows easy profiling of node activity using `perf`.
 
         See `test/functional/README.md` for details on perf usage.
 
         Args:
             profile_name (str): This string will be appended to the
                 profile data filename generated by perf.
         """
         subp = self._start_perf(profile_name)
 
         yield
 
         if subp:
             self._stop_perf(profile_name)
 
     def _start_perf(self, profile_name=None):
         """Start a perf process to profile this node.
 
         Returns the subprocess running perf."""
         subp = None
 
         def test_success(cmd):
             return subprocess.call(
                 # shell=True required for pipe use below
                 cmd, shell=True,
                 stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
 
         if not sys.platform.startswith('linux'):
             self.log.warning(
                 "Can't profile with perf; only availabe on Linux platforms")
             return None
 
         if not test_success('which perf'):
             self.log.warning(
                 "Can't profile with perf; must install perf-tools")
             return None
 
         if not test_success(
                 'readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
             self.log.warning(
                 "perf output won't be very useful without debug symbols compiled into bitcoind")
 
         output_path = tempfile.NamedTemporaryFile(
             dir=self.datadir,
             prefix="{}.perf.data.".format(profile_name or 'test'),
             delete=False,
         ).name
 
         cmd = [
             'perf', 'record',
             '-g',                     # Record the callgraph.
             # Compatibility for gcc's --fomit-frame-pointer.
             '--call-graph', 'dwarf',
             '-F', '101',              # Sampling frequency in Hz.
             '-p', str(self.process.pid),
             '-o', output_path,
         ]
         subp = subprocess.Popen(
             cmd,
             stdout=subprocess.PIPE,
             stderr=subprocess.PIPE)
         self.perf_subprocesses[profile_name] = subp
 
         return subp
 
     def _stop_perf(self, profile_name):
         """Stop (and pop) a perf subprocess."""
         subp = self.perf_subprocesses.pop(profile_name)
         output_path = subp.args[subp.args.index('-o') + 1]
 
         subp.terminate()
         subp.wait(timeout=10)
 
         stderr = subp.stderr.read().decode()
         if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
             self.log.warning(
                 "perf couldn't collect data! Try "
                 "'sudo sysctl -w kernel.perf_event_paranoid=-1'")
         else:
             report_cmd = "perf report -i {}".format(output_path)
             self.log.info("See perf output by running '{}'".format(report_cmd))
 
     def assert_start_raises_init_error(
             self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
         """Attempt to start the node and expect it to raise an error.
 
         extra_args: extra arguments to pass through to bitcoind
         expected_msg: regex that stderr should match when bitcoind fails
 
         Will throw if bitcoind starts without an error.
         Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
         with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
                 tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
             try:
                 self.start(extra_args, stdout=log_stdout,
                            stderr=log_stderr, *args, **kwargs)
                 self.wait_for_rpc_connection()
                 self.stop_node()
                 self.wait_until_stopped()
             except FailedToStartError as e:
                 self.log.debug('bitcoind failed to start: {}'.format(e))
                 self.running = False
                 self.process = None
                 # Check stderr for expected message
                 if expected_msg is not None:
                     log_stderr.seek(0)
                     stderr = log_stderr.read().decode('utf-8').strip()
                     if match == ErrorMatch.PARTIAL_REGEX:
                         if re.search(expected_msg, stderr,
                                      flags=re.MULTILINE) is None:
                             self._raise_assertion_error(
                                 'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
                     elif match == ErrorMatch.FULL_REGEX:
                         if re.fullmatch(expected_msg, stderr) is None:
                             self._raise_assertion_error(
                                 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
                     elif match == ErrorMatch.FULL_TEXT:
                         if expected_msg != stderr:
                             self._raise_assertion_error(
                                 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
             else:
                 if expected_msg is None:
                     assert_msg = "bitcoind should have exited with an error"
                 else:
                     assert_msg = "bitcoind should have exited with expected error " + expected_msg
                 self._raise_assertion_error(assert_msg)
 
     def relay_fee(self, cached=True):
         if not self.relay_fee_cache or not cached:
             self.relay_fee_cache = self.getnetworkinfo()["relayfee"]
 
         return self.relay_fee_cache
 
     def calculate_fee(self, tx):
         """ Estimate the necessary fees (in sats) for an unsigned CTransaction assuming:
         - the current relayfee on node
         - all inputs are compressed-key p2pkh, and will be signed ecdsa or schnorr
         - all inputs currently unsigned (empty scriptSig)
         """
         billable_size_estimate = tx.billable_size()
         # Add some padding for signatures / public keys
         # 107 = length of PUSH(longest_sig = 72 bytes), PUSH(pubkey = 33 bytes)
         billable_size_estimate += len(tx.vin) * 107
 
         # relay_fee gives a value in XEC per kB.
         return int(self.relay_fee() / 1000 * billable_size_estimate * XEC)
 
     def calculate_fee_from_txid(self, txid):
         ctx = FromHex(CTransaction(), self.getrawtransaction(txid))
         return self.calculate_fee(ctx)
 
     def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
         """Add a p2p connection to the node.
 
         This method adds the p2p connection to the self.p2ps list and also
         returns the connection to the caller."""
         if 'dstport' not in kwargs:
             kwargs['dstport'] = p2p_port(self.index)
         if 'dstaddr' not in kwargs:
             kwargs['dstaddr'] = '127.0.0.1'
 
         p2p_conn.peer_connect(
             **kwargs,
             net=self.chain,
             timeout_factor=self.timeout_factor)()
         self.p2ps.append(p2p_conn)
         p2p_conn.wait_until(
             lambda: p2p_conn.is_connected,
             check_connected=False)
         if wait_for_verack:
             # Wait for the node to send us the version and verack
             p2p_conn.wait_for_verack()
             # At this point we have sent our version message and received the version and verack, however the full node
             # has not yet received the verack from us (in reply to their version). So, the connection is not yet fully
             # established (fSuccessfullyConnected).
             #
             # This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the
             # message we send. However, it might lead to races where we are expecting to receive a message. E.g. a
             # transaction that will be added to the mempool as soon as we return here.
             #
             # So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds)
             # in comparison to the upside of making tests less fragile and
             # unexpected intermittent errors less likely.
             p2p_conn.sync_with_ping()
 
         return p2p_conn
 
     @property
     def p2p(self):
         """Return the first p2p connection
 
         Convenience property - most tests only use a single p2p connection to each
         node, so this saves having to write node.p2ps[0] many times."""
         assert self.p2ps, self._node_msg("No p2p connection")
         return self.p2ps[0]
 
     def num_test_p2p_connections(self):
         """Return number of test framework p2p connections to the node."""
         return len([peer for peer in self.getpeerinfo()
                     if peer['subver'] == MY_SUBVERSION])
 
     def disconnect_p2ps(self):
         """Close all p2p connections to the node."""
         for p in self.p2ps:
             p.peer_disconnect()
         del self.p2ps[:]
         wait_until_helper(lambda: self.num_test_p2p_connections() == 0,
                           timeout_factor=self.timeout_factor)
 
 
 class TestNodeCLIAttr:
     def __init__(self, cli, command):
         self.cli = cli
         self.command = command
 
     def __call__(self, *args, **kwargs):
         return self.cli.send_cli(self.command, *args, **kwargs)
 
     def get_request(self, *args, **kwargs):
         return lambda: self(*args, **kwargs)
 
 
 def arg_to_cli(arg):
     if isinstance(arg, bool):
         return str(arg).lower()
     elif arg is None:
         return 'null'
     elif isinstance(arg, dict) or isinstance(arg, list):
         return json.dumps(arg, default=EncodeDecimal)
     else:
         return str(arg)
 
 
 class TestNodeCLI():
     """Interface to bitcoin-cli for an individual node"""
 
     def __init__(self, binary, datadir, emulator=None):
         self.options = []
         self.binary = binary
         self.datadir = datadir
         self.input = None
         self.log = logging.getLogger('TestFramework.bitcoincli')
         self.emulator = emulator
 
     def __call__(self, *options, input=None):
         # TestNodeCLI is callable with bitcoin-cli command-line options
         cli = TestNodeCLI(self.binary, self.datadir, self.emulator)
         cli.options = [str(o) for o in options]
         cli.input = input
         return cli
 
     def __getattr__(self, command):
         return TestNodeCLIAttr(self, command)
 
     def batch(self, requests):
         results = []
         for request in requests:
             try:
                 results.append(dict(result=request()))
             except JSONRPCException as e:
                 results.append(dict(error=e))
         return results
 
     def send_cli(self, command=None, *args, **kwargs):
         """Run bitcoin-cli command. Deserializes returned string as python object."""
         pos_args = [arg_to_cli(arg) for arg in args]
         named_args = [str(key) + "=" + arg_to_cli(value)
                       for (key, value) in kwargs.items()]
         assert not (
             pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
         p_args = [self.binary, "-datadir=" + self.datadir] + self.options
         if named_args:
             p_args += ["-named"]
         if command is not None:
             p_args += [command]
         p_args += pos_args + named_args
         self.log.debug("Running bitcoin-cli {}".format(p_args[2:]))
         if self.emulator is not None:
             p_args = [self.emulator] + p_args
         process = subprocess.Popen(p_args, stdin=subprocess.PIPE,
                                    stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
         cli_stdout, cli_stderr = process.communicate(input=self.input)
         returncode = process.poll()
         if returncode:
             match = re.match(
                 r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
             if match:
                 code, message = match.groups()
                 raise JSONRPCException(dict(code=int(code), message=message))
             # Ignore cli_stdout, raise with cli_stderr
             raise subprocess.CalledProcessError(
                 returncode, self.binary, output=cli_stderr)
         try:
             return json.loads(cli_stdout, parse_float=decimal.Decimal)
         except (json.JSONDecodeError, decimal.InvalidOperation):
             return cli_stdout.rstrip("\n")
 
 
 class RPCOverloadWrapper():
     def __init__(self, rpc, cli=False, descriptors=False):
         self.rpc = rpc
         self.is_cli = cli
         # FIXME: self.descriptors and createwallet are supposed to be
         #  introduced by PR16528 but it will take more time to backport it,
         #  so this is added now to be able to progress on other backports.
         #  For now, descriptors is always False
         self.descriptors = descriptors
 
     def __getattr__(self, name):
         return getattr(self.rpc, name)
 
     def createwallet(self, wallet_name, disable_private_keys=None, blank=None,
                      passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None):
         if descriptors is None:
             descriptors = self.descriptors
         return self.__getattr__('createwallet')(
             wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup)
 
     def importprivkey(self, privkey, label=None, rescan=None):
         wallet_info = self.getwalletinfo()
         if 'descriptors' not in wallet_info or (
                 'descriptors' in wallet_info and not wallet_info['descriptors']):
             return self.__getattr__('importprivkey')(privkey, label, rescan)
         desc = descsum_create('combo(' + privkey + ')')
         req = [{
             'desc': desc,
             'timestamp': 0 if rescan else 'now',
             'label': label if label else ''
         }]
         import_res = self.importdescriptors(req)
         if not import_res[0]['success']:
             raise JSONRPCException(import_res[0]['error'])
 
     def addmultisigaddress(self, nrequired, keys,
                            label=None):
         wallet_info = self.getwalletinfo()
         if 'descriptors' not in wallet_info or (
                 'descriptors' in wallet_info and not wallet_info['descriptors']):
             return self.__getattr__('addmultisigaddress')(
                 nrequired, keys, label)
         cms = self.createmultisig(nrequired, keys)
         req = [{
             'desc': cms['descriptor'],
             'timestamp': 0,
             'label': label if label else ''
         }]
         import_res = self.importdescriptors(req)
         if not import_res[0]['success']:
             raise JSONRPCException(import_res[0]['error'])
         return cms
 
     def importpubkey(self, pubkey, label=None, rescan=None):
         wallet_info = self.getwalletinfo()
         if 'descriptors' not in wallet_info or (
                 'descriptors' in wallet_info and not wallet_info['descriptors']):
             return self.__getattr__('importpubkey')(pubkey, label, rescan)
         desc = descsum_create('combo(' + pubkey + ')')
         req = [{
             'desc': desc,
             'timestamp': 0 if rescan else 'now',
             'label': label if label else ''
         }]
         import_res = self.importdescriptors(req)
         if not import_res[0]['success']:
             raise JSONRPCException(import_res[0]['error'])
 
     def importaddress(self, address, label=None, rescan=None, p2sh=None):
         wallet_info = self.getwalletinfo()
         if 'descriptors' not in wallet_info or (
                 'descriptors' in wallet_info and not wallet_info['descriptors']):
             return self.__getattr__('importaddress')(
                 address, label, rescan, p2sh)
         is_hex = False
         try:
             int(address, 16)
             is_hex = True
             desc = descsum_create('raw(' + address + ')')
         except BaseException:
             desc = descsum_create('addr(' + address + ')')
         reqs = [{
             'desc': desc,
             'timestamp': 0 if rescan else 'now',
             'label': label if label else ''
         }]
         if is_hex and p2sh:
             reqs.append({
                 'desc': descsum_create('p2sh(raw(' + address + '))'),
                 'timestamp': 0 if rescan else 'now',
                 'label': label if label else ''
             })
         import_res = self.importdescriptors(reqs)
         for res in import_res:
             if not res['success']:
                 raise JSONRPCException(res['error'])
diff --git a/test/functional/test_framework/txtools.py b/test/functional/test_framework/txtools.py
index 09f154b84..2ae3b3d68 100644
--- a/test/functional/test_framework/txtools.py
+++ b/test/functional/test_framework/txtools.py
@@ -1,72 +1,72 @@
 #!/usr/bin/env python3
 import random
 
 from .cdefs import MAX_TXOUT_PUBKEY_SCRIPT, MIN_TX_SIZE
 from .messages import CTransaction, CTxOut, FromHex, ToHex
-from .script import CScript, OP_RETURN
+from .script import OP_RETURN, CScript
 
 
 def pad_tx(tx, pad_to_size=MIN_TX_SIZE):
     """
     Pad a transaction with op_return junk data until it is at least pad_to_size, or
     leave it alone if it's already bigger than that.
     """
     curr_size = len(tx.serialize())
     if curr_size >= pad_to_size:
         # Bail early txn is already big enough
         return
 
     # This code attempts to pad a transaction with opreturn vouts such that
     # it will be exactly pad_to_size.  In order to do this we have to create
     # vouts of size x (maximum OP_RETURN size - vout overhead), plus the final
     # one subsumes any runoff which would be less than vout overhead.
     #
     # There are two cases where this is not possible:
     # 1. The transaction size is between pad_to_size and pad_to_size - extrabytes
     # 2. The transaction is already greater than pad_to_size
     #
     # Visually:
     # | .. x  .. | .. x .. | .. x .. | .. x + desired_size % x |
     #    VOUT_1     VOUT_2    VOUT_3    VOUT_4
     # txout.value + txout.pk_script bytes + op_return
     extra_bytes = 8 + 1 + 1
     required_padding = pad_to_size - curr_size
     while required_padding > 0:
         # We need at least extra_bytes left over each time, or we can't
         # subsume the final (and possibly undersized) iteration of the loop
         padding_len = min(required_padding,
                           MAX_TXOUT_PUBKEY_SCRIPT - extra_bytes)
         assert padding_len >= 0, "Can't pad less than 0 bytes, trying {}".format(
             padding_len)
         # We will end up with less than 1 UTXO of bytes after this, add
         # them to this txn
         next_iteration_padding = required_padding - padding_len - extra_bytes
         if next_iteration_padding > 0 and next_iteration_padding < extra_bytes:
             padding_len += next_iteration_padding
 
         # If we're at exactly, or below, extra_bytes we don't want a 1 extra
         # byte padding
         if padding_len <= extra_bytes:
             tx.vout.append(CTxOut(0, CScript([OP_RETURN])))
         else:
             # Subtract the overhead for the TxOut
             padding_len -= extra_bytes
             padding = random.randrange(
                 1 << 8 * padding_len - 2, 1 << 8 * padding_len - 1)
             tx.vout.append(
                 CTxOut(0, CScript([OP_RETURN, padding])))
 
         curr_size = len(tx.serialize())
         required_padding = pad_to_size - curr_size
     assert curr_size >= pad_to_size, "{} !>= {}".format(curr_size, pad_to_size)
     tx.rehash()
 
 
 def pad_raw_tx(rawtx_hex, min_size=MIN_TX_SIZE):
     """
     Pad a raw transaction with OP_RETURN data until it reaches at least min_size
     """
     tx = CTransaction()
     FromHex(tx, rawtx_hex)
     pad_tx(tx, min_size)
     return ToHex(tx)
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index bcd0ea63d..c3f02df3d 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -1,644 +1,644 @@
 #!/usr/bin/env python3
 # Copyright (c) 2014-2019 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Helpful routines for regression testing."""
 
-from base64 import b64encode
-from binascii import unhexlify
-from decimal import Decimal, ROUND_DOWN
-from io import BytesIO
-from subprocess import CalledProcessError
 import inspect
 import json
 import logging
 import os
 import random
 import re
 import time
 import unittest
+from base64 import b64encode
+from binascii import unhexlify
+from decimal import ROUND_DOWN, Decimal
+from io import BytesIO
+from subprocess import CalledProcessError
 
 from . import coverage
 from .authproxy import AuthServiceProxy, JSONRPCException
 
 logger = logging.getLogger("TestFramework.utils")
 
 # Assert functions
 ##################
 
 
 def assert_approx(v, vexp, vspan=10):
     """Assert that `v` is within `vspan` of `vexp`"""
     if v < vexp - vspan:
         raise AssertionError("{} < [{}..{}]".format(
             str(v), str(vexp - vspan), str(vexp + vspan)))
     if v > vexp + vspan:
         raise AssertionError("{} > [{}..{}]".format(
             str(v), str(vexp - vspan), str(vexp + vspan)))
 
 
 def assert_fee_amount(fee, tx_size, fee_per_kB, wiggleroom=2):
     """
     Assert the fee was in range
 
     wiggleroom defines an amount that the test expects the wallet to be off by
     when estimating fees.  This can be due to the dummy signature that is added
     during fee calculation, or due to the wallet funding transactions using the
     ceiling of the calculated fee.
     """
     target_fee = round(tx_size * fee_per_kB / 1000, 8)
     if fee < (tx_size - wiggleroom) * fee_per_kB / 1000:
         raise AssertionError(
             "Fee of {} XEC too low! (Should be {} XEC)".format(str(fee), str(target_fee)))
     if fee > (tx_size + wiggleroom) * fee_per_kB / 1000:
         raise AssertionError(
             "Fee of {} XEC too high! (Should be {} XEC)".format(str(fee), str(target_fee)))
 
 
 def assert_equal(thing1, thing2, *args):
     if thing1 != thing2 or any(thing1 != arg for arg in args):
         raise AssertionError("not({})".format(" == ".join(str(arg)
                                                           for arg in (thing1, thing2) + args)))
 
 
 def assert_greater_than(thing1, thing2):
     if thing1 <= thing2:
         raise AssertionError("{} <= {}".format(str(thing1), str(thing2)))
 
 
 def assert_greater_than_or_equal(thing1, thing2):
     if thing1 < thing2:
         raise AssertionError("{} < {}".format(str(thing1), str(thing2)))
 
 
 def assert_raises(exc, fun, *args, **kwds):
     assert_raises_message(exc, None, fun, *args, **kwds)
 
 
 def assert_raises_message(exc, message, fun, *args, **kwds):
     try:
         fun(*args, **kwds)
     except JSONRPCException:
         raise AssertionError(
             "Use assert_raises_rpc_error() to test RPC failures")
     except exc as e:
         if message is not None and message not in e.error['message']:
             raise AssertionError(
                 "Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
                     message, e.error['message']))
     except Exception as e:
         raise AssertionError(
             "Unexpected exception raised: " + type(e).__name__)
     else:
         raise AssertionError("No exception raised")
 
 
 def assert_raises_process_error(returncode, output, fun, *args, **kwds):
     """Execute a process and asserts the process return code and output.
 
     Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
     and verifies that the return code and output are as expected. Throws AssertionError if
     no CalledProcessError was raised or if the return code and output are not as expected.
 
     Args:
         returncode (int): the process return code.
         output (string): [a substring of] the process output.
         fun (function): the function to call. This should execute a process.
         args*: positional arguments for the function.
         kwds**: named arguments for the function.
     """
     try:
         fun(*args, **kwds)
     except CalledProcessError as e:
         if returncode != e.returncode:
             raise AssertionError(
                 "Unexpected returncode {}".format(e.returncode))
         if output not in e.output:
             raise AssertionError("Expected substring not found:" + e.output)
     else:
         raise AssertionError("No exception raised")
 
 
 def assert_raises_rpc_error(code, message, fun, *args, **kwds):
     """Run an RPC and verify that a specific JSONRPC exception code and message is raised.
 
     Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
     and verifies that the error code and message are as expected. Throws AssertionError if
     no JSONRPCException was raised or if the error code/message are not as expected.
 
     Args:
         code (int), optional: the error code returned by the RPC call (defined
             in src/rpc/protocol.h). Set to None if checking the error code is not required.
         message (string), optional: [a substring of] the error string returned by the
             RPC call. Set to None if checking the error string is not required.
         fun (function): the function to call. This should be the name of an RPC.
         args*: positional arguments for the function.
         kwds**: named arguments for the function.
     """
     assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
 
 
 def try_rpc(code, message, fun, *args, **kwds):
     """Tries to run an rpc command.
 
     Test against error code and message if the rpc fails.
     Returns whether a JSONRPCException was raised."""
     try:
         fun(*args, **kwds)
     except JSONRPCException as e:
         # JSONRPCException was thrown as expected. Check the code and message
         # values are correct.
         if (code is not None) and (code != e.error["code"]):
             raise AssertionError(
                 "Unexpected JSONRPC error code {}".format(e.error["code"]))
         if (message is not None) and (message not in e.error['message']):
             raise AssertionError(
                 "Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
                     message, e.error['message']))
         return True
     except Exception as e:
         raise AssertionError(
             "Unexpected exception raised: " + type(e).__name__)
     else:
         return False
 
 
 def assert_is_hex_string(string):
     try:
         int(string, 16)
     except Exception as e:
         raise AssertionError(
             "Couldn't interpret {!r} as hexadecimal; raised: {}".format(string, e))
 
 
 def assert_is_hash_string(string, length=64):
     if not isinstance(string, str):
         raise AssertionError(
             "Expected a string, got type {!r}".format(type(string)))
     elif length and len(string) != length:
         raise AssertionError(
             "String of length {} expected; got {}".format(length, len(string)))
     elif not re.match('[abcdef0-9]+$', string):
         raise AssertionError(
             "String {!r} contains invalid characters for a hash.".format(string))
 
 
 def assert_array_result(object_array, to_match, expected,
                         should_not_find=False):
     """
     Pass in array of JSON objects, a dictionary with key/value pairs
     to match against, and another dictionary with expected key/value
     pairs.
     If the should_not_find flag is true, to_match should not be found
     in object_array
     """
     if should_not_find:
         assert_equal(expected, {})
     num_matched = 0
     for item in object_array:
         all_match = True
         for key, value in to_match.items():
             if item[key] != value:
                 all_match = False
         if not all_match:
             continue
         elif should_not_find:
             num_matched = num_matched + 1
         for key, value in expected.items():
             if item[key] != value:
                 raise AssertionError("{} : expected {}={}".format(
                     str(item), str(key), str(value)))
             num_matched = num_matched + 1
     if num_matched == 0 and not should_not_find:
         raise AssertionError("No objects matched {}".format(str(to_match)))
     if num_matched > 0 and should_not_find:
         raise AssertionError("Objects were found {}".format(str(to_match)))
 
 # Utility functions
 ###################
 
 
 def check_json_precision():
     """Make sure json library being used does not lose precision converting XEC
     values"""
     n = Decimal("20000000.00000003")
     satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
     if satoshis != 2000000000000003:
         raise RuntimeError("JSON encode/decode loses precision")
 
 
 def EncodeDecimal(o):
     if isinstance(o, Decimal):
         return str(o)
     raise TypeError(repr(o) + " is not JSON serializable")
 
 
 def count_bytes(hex_string):
     return len(bytearray.fromhex(hex_string))
 
 
 def hex_str_to_bytes(hex_str):
     return unhexlify(hex_str.encode('ascii'))
 
 
 def str_to_b64str(string):
     return b64encode(string.encode('utf-8')).decode('ascii')
 
 
 def satoshi_round(amount):
     return Decimal(amount).quantize(Decimal('0.01'), rounding=ROUND_DOWN)
 
 
 def wait_until_helper(predicate, *, attempts=float('inf'),
                       timeout=float('inf'), lock=None, timeout_factor=1.0):
     """Sleep until the predicate resolves to be True.
 
     Warning: Note that this method is not recommended to be used in tests as it is
     not aware of the context of the test framework. Using the `wait_until()` members
     from `BitcoinTestFramework` or `P2PInterface` class ensures the timeout is
     properly scaled. Furthermore, `wait_until()` from `P2PInterface` class in
     `p2p.py` has a preset lock.
     """
     if attempts == float('inf') and timeout == float('inf'):
         timeout = 60
     timeout = timeout * timeout_factor
     attempt = 0
     time_end = time.time() + timeout
 
     while attempt < attempts and time.time() < time_end:
         if lock:
             with lock:
                 if predicate():
                     return
         else:
             if predicate():
                 return
         attempt += 1
         time.sleep(0.05)
 
     # Print the cause of the timeout
     predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
     logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
     if attempt >= attempts:
         raise AssertionError("Predicate {} not true after {} attempts".format(
             predicate_source, attempts))
     elif time.time() >= time_end:
         raise AssertionError(
             "Predicate {} not true after {} seconds".format(predicate_source, timeout))
     raise RuntimeError('Unreachable')
 
 # RPC/P2P connection constants and functions
 ############################################
 
 
 # The maximum number of nodes a single test can spawn
 MAX_NODES = 12
 # Don't assign rpc or p2p ports lower than this (for example: 18333 is the
 # default testnet port)
 PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=20000))
 # The number of ports to "reserve" for p2p and rpc, each
 PORT_RANGE = 5000
 
 
 class PortSeed:
     # Must be initialized with a unique integer for each process
     n = None
 
 
 def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None):
     """
     Args:
         url (str): URL of the RPC server to call
         node_number (int): the node number (or id) that this calls to
 
     Kwargs:
         timeout (int): HTTP timeout in seconds
         coveragedir (str): Directory
 
     Returns:
         AuthServiceProxy. convenience object for making RPC calls.
 
     """
     proxy_kwargs = {}
     if timeout is not None:
         proxy_kwargs['timeout'] = int(timeout)
 
     proxy = AuthServiceProxy(url, **proxy_kwargs)
     proxy.url = url  # store URL on proxy for info
 
     coverage_logfile = coverage.get_filename(
         coveragedir, node_number) if coveragedir else None
 
     return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
 
 
 def p2p_port(n):
     assert n <= MAX_NODES
     return PORT_MIN + n + \
         (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
 
 
 def rpc_port(n):
     return PORT_MIN + PORT_RANGE + n + \
         (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
 
 
 def rpc_url(datadir, chain, host, port):
     rpc_u, rpc_p = get_auth_cookie(datadir, chain)
     if host is None:
         host = '127.0.0.1'
     return "http://{}:{}@{}:{}".format(rpc_u, rpc_p, host, int(port))
 
 # Node functions
 ################
 
 
 def initialize_datadir(dirname, n, chain):
     datadir = get_datadir_path(dirname, n)
     if not os.path.isdir(datadir):
         os.makedirs(datadir)
     # Translate chain name to config name
     if chain == 'testnet3':
         chain_name_conf_arg = 'testnet'
         chain_name_conf_section = 'test'
     else:
         chain_name_conf_arg = chain
         chain_name_conf_section = chain
     with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f:
         f.write("{}=1\n".format(chain_name_conf_arg))
         f.write("[{}]\n".format(chain_name_conf_section))
         f.write("port=" + str(p2p_port(n)) + "\n")
         f.write("rpcport=" + str(rpc_port(n)) + "\n")
         f.write("fallbackfee=200\n")
         f.write("server=1\n")
         f.write("keypool=1\n")
         f.write("discover=0\n")
         f.write("dnsseed=0\n")
         f.write("listenonion=0\n")
         f.write("usecashaddr=1\n")
         f.write("shrinkdebugfile=0\n")
         os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
         os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
     return datadir
 
 
 def get_datadir_path(dirname, n):
     return os.path.join(dirname, "node" + str(n))
 
 
 def append_config(datadir, options):
     with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f:
         for option in options:
             f.write(option + "\n")
 
 
 def get_auth_cookie(datadir, chain):
     user = None
     password = None
     if os.path.isfile(os.path.join(datadir, "bitcoin.conf")):
         with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f:
             for line in f:
                 if line.startswith("rpcuser="):
                     assert user is None  # Ensure that there is only one rpcuser line
                     user = line.split("=")[1].strip("\n")
                 if line.startswith("rpcpassword="):
                     assert password is None  # Ensure that there is only one rpcpassword line
                     password = line.split("=")[1].strip("\n")
     try:
         with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f:
             userpass = f.read()
             split_userpass = userpass.split(':')
             user = split_userpass[0]
             password = split_userpass[1]
     except OSError:
         pass
     if user is None or password is None:
         raise ValueError("No RPC credentials")
     return user, password
 
 
 # If a cookie file exists in the given datadir, delete it.
 def delete_cookie_file(datadir, chain):
     if os.path.isfile(os.path.join(datadir, chain, ".cookie")):
         logger.debug("Deleting leftover cookie file")
         os.remove(os.path.join(datadir, chain, ".cookie"))
 
 
 def set_node_times(nodes, t):
     for node in nodes:
         node.setmocktime(t)
 
 
 def disconnect_nodes(from_node, to_node):
     def get_peer_ids():
         result = []
         for peer in from_node.getpeerinfo():
             if to_node.name in peer['subver']:
                 result.append(peer['id'])
         return result
 
     peer_ids = get_peer_ids()
     if not peer_ids:
         logger.warning(
             f"disconnect_nodes: {from_node.index} and {to_node.index} were not connected")
         return
     for peer_id in peer_ids:
         try:
             from_node.disconnectnode(nodeid=peer_id)
         except JSONRPCException as e:
             # If this node is disconnected between calculating the peer id
             # and issuing the disconnect, don't worry about it.
             # This avoids a race condition if we're mass-disconnecting peers.
             if e.error['code'] != -29:  # RPC_CLIENT_NODE_NOT_CONNECTED
                 raise
 
     # wait to disconnect
     wait_until_helper(lambda: not get_peer_ids(), timeout=5)
 
 
 def connect_nodes(from_node, to_node):
     host = to_node.host
     if host is None:
         host = '127.0.0.1'
     ip_port = host + ':' + str(to_node.p2p_port)
     from_node.addnode(ip_port, "onetry")
     # poll until version handshake complete to avoid race conditions
     # with transaction relaying
     # See comments in net_processing:
     # * Must have a version message before anything else
     # * Must have a verack message before anything else
     wait_until_helper(
         lambda: all(peer['version'] != 0
                     for peer in from_node.getpeerinfo()))
     wait_until_helper(
         lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24
                     for peer in from_node.getpeerinfo()))
 
 
 # Transaction/Block functions
 #############################
 
 
 def find_output(node, txid, amount, *, blockhash=None):
     """
     Return index to output of txid with value amount
     Raises exception if there is none.
     """
     txdata = node.getrawtransaction(txid, 1, blockhash)
     for i in range(len(txdata["vout"])):
         if txdata["vout"][i]["value"] == amount:
             return i
     raise RuntimeError("find_output txid {} : {} not found".format(
         txid, str(amount)))
 
 
 def gather_inputs(from_node, amount_needed, confirmations_required=1):
     """
     Return a random set of unspent txouts that are enough to pay amount_needed
     """
     assert confirmations_required >= 0
     utxo = from_node.listunspent(confirmations_required)
     random.shuffle(utxo)
     inputs = []
     total_in = Decimal("0.00000000")
     while total_in < amount_needed and len(utxo) > 0:
         t = utxo.pop()
         total_in += t["amount"]
         inputs.append(
             {"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
     if total_in < amount_needed:
         raise RuntimeError("Insufficient funds: need {}, have {}".format(
             amount_needed, total_in))
     return (total_in, inputs)
 
 
 def make_change(from_node, amount_in, amount_out, fee):
     """
     Create change output(s), return them
     """
     outputs = {}
     amount = amount_out + fee
     change = amount_in - amount
     if change > amount * 2:
         # Create an extra change output to break up big inputs
         change_address = from_node.getnewaddress()
         # Split change in two, being careful of rounding:
         outputs[change_address] = Decimal(
             change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
         change = amount_in - amount - outputs[change_address]
     if change > 0:
         outputs[from_node.getnewaddress()] = change
     return outputs
 
 
 def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
     """
     Create a random transaction.
     Returns (txid, hex-encoded-transaction-data, fee)
     """
     from_node = random.choice(nodes)
     to_node = random.choice(nodes)
     fee = min_fee + fee_increment * random.randint(0, fee_variants)
 
     (total_in, inputs) = gather_inputs(from_node, amount + fee)
     outputs = make_change(from_node, total_in, amount, fee)
     outputs[to_node.getnewaddress()] = float(amount)
 
     rawtx = from_node.createrawtransaction(inputs, outputs)
     signresult = from_node.signrawtransactionwithwallet(rawtx)
     txid = from_node.sendrawtransaction(signresult["hex"], 0)
 
     return (txid, signresult["hex"], fee)
 
 # Create large OP_RETURN txouts that can be appended to a transaction
 # to make it large (helper for constructing large transactions).
 
 
 def gen_return_txouts():
     # Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
     # So we have big transactions (and therefore can't fit very many into each block)
     # create one script_pubkey
     script_pubkey = "6a4d0200"  # OP_RETURN OP_PUSH2 512 bytes
     for _ in range(512):
         script_pubkey = script_pubkey + "01"
     # concatenate 128 txouts of above script_pubkey which we'll insert before
     # the txout for change
     txouts = []
     from .messages import CTxOut
     txout = CTxOut()
     txout.nValue = 0
     txout.scriptPubKey = hex_str_to_bytes(script_pubkey)
     for _ in range(128):
         txouts.append(txout)
     return txouts
 
 # Create a spend of each passed-in utxo, splicing in "txouts" to each raw
 # transaction to make it large.  See gen_return_txouts() above.
 
 
 def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
     addr = node.getnewaddress()
     txids = []
     from .messages import CTransaction
     for _ in range(num):
         t = utxos.pop()
         inputs = [{"txid": t["txid"], "vout": t["vout"]}]
         outputs = {}
         change = t['amount'] - fee
         outputs[addr] = satoshi_round(change)
         rawtx = node.createrawtransaction(inputs, outputs)
         tx = CTransaction()
         tx.deserialize(BytesIO(hex_str_to_bytes(rawtx)))
         for txout in txouts:
             tx.vout.append(txout)
         newtx = tx.serialize().hex()
         signresult = node.signrawtransactionwithwallet(
             newtx, None, "NONE|FORKID")
         txid = node.sendrawtransaction(signresult["hex"], 0)
         txids.append(txid)
     return txids
 
 
 def find_vout_for_address(node, txid, addr):
     """
     Locate the vout index of the given transaction sending to the
     given address. Raises runtime error exception if not found.
     """
     tx = node.getrawtransaction(txid, True)
     for i in range(len(tx["vout"])):
         if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
             return i
     raise RuntimeError(
         "Vout not found for address: txid={}, addr={}".format(txid, addr))
 
 
 def modinv(a, n):
     """Compute the modular inverse of a modulo n using the extended Euclidean
     Algorithm. See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers.
     """
     # TODO: Change to pow(a, -1, n) available in Python 3.8
     t1, t2 = 0, 1
     r1, r2 = n, a
     while r2 != 0:
         q = r1 // r2
         t1, t2 = t2, t1 - q * t2
         r1, r2 = r2, r1 - q * r2
     if r1 > 1:
         return None
     if t1 < 0:
         t1 += n
     return t1
 
 
 class TestFrameworkUtil(unittest.TestCase):
     def test_modinv(self):
         test_vectors = [
             [7, 11],
             [11, 29],
             [90, 13],
             [1891, 3797],
             [6003722857, 77695236973],
         ]
 
         for a, n in test_vectors:
             self.assertEqual(modinv(a, n), pow(a, n - 2, n))
diff --git a/test/functional/test_framework/wallet_util.py b/test/functional/test_framework/wallet_util.py
index d0cd0eb4e..29850f39a 100755
--- a/test/functional/test_framework/wallet_util.py
+++ b/test/functional/test_framework/wallet_util.py
@@ -1,114 +1,110 @@
 #!/usr/bin/env python3
 # Copyright (c) 2018 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Useful util functions for testing the wallet"""
 from collections import namedtuple
 
-from test_framework.address import (
-    byte_to_base58,
-    key_to_p2pkh,
-    script_to_p2sh,
-)
+from test_framework.address import byte_to_base58, key_to_p2pkh, script_to_p2sh
 from test_framework.key import ECKey
 from test_framework.script import (
-    CScript,
     OP_2,
     OP_3,
     OP_CHECKMULTISIG,
     OP_CHECKSIG,
     OP_DUP,
     OP_EQUAL,
     OP_EQUALVERIFY,
     OP_HASH160,
+    CScript,
     hash160,
 )
 from test_framework.util import hex_str_to_bytes
 
 Key = namedtuple('Key', ['privkey',
                          'pubkey',
                          'p2pkh_script',
                          'p2pkh_addr'])
 
 Multisig = namedtuple('Multisig', ['privkeys',
                                    'pubkeys',
                                    'p2sh_script',
                                    'p2sh_addr',
                                    'redeem_script'])
 
 
 def get_key(node):
     """Generate a fresh key on node
 
     Returns a named tuple of privkey, pubkey and all address and scripts."""
     addr = node.getnewaddress()
     pubkey = node.getaddressinfo(addr)['pubkey']
     pkh = hash160(hex_str_to_bytes(pubkey))
     return Key(privkey=node.dumpprivkey(addr),
                pubkey=pubkey,
                p2pkh_script=CScript(
                    [OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG]).hex(),
                p2pkh_addr=key_to_p2pkh(pubkey))
 
 
 def get_generate_key():
     """Generate a fresh key
 
     Returns a named tuple of privkey, pubkey and all address and scripts."""
     eckey = ECKey()
     eckey.generate()
     privkey = bytes_to_wif(eckey.get_bytes())
     pubkey = eckey.get_pubkey().get_bytes().hex()
     pkh = hash160(hex_str_to_bytes(pubkey))
     return Key(privkey=privkey,
                pubkey=pubkey,
                p2pkh_script=CScript(
                    [OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG]).hex(),
                p2pkh_addr=key_to_p2pkh(pubkey))
 
 
 def get_multisig(node):
     """Generate a fresh 2-of-3 multisig on node
 
     Returns a named tuple of privkeys, pubkeys and all address and scripts."""
     addrs = []
     pubkeys = []
     for _ in range(3):
         addr = node.getaddressinfo(node.getnewaddress())
         addrs.append(addr['address'])
         pubkeys.append(addr['pubkey'])
     script_code = CScript([OP_2] + [hex_str_to_bytes(pubkey)
                                     for pubkey in pubkeys] + [OP_3, OP_CHECKMULTISIG])
     return Multisig(privkeys=[node.dumpprivkey(addr) for addr in addrs],
                     pubkeys=pubkeys,
                     p2sh_script=CScript(
                         [OP_HASH160, hash160(script_code), OP_EQUAL]).hex(),
                     p2sh_addr=script_to_p2sh(script_code),
                     redeem_script=script_code.hex())
 
 
 def test_address(node, address, **kwargs):
     """Get address info for `address` and test whether the returned values are as expected."""
     addr_info = node.getaddressinfo(address)
     for key, value in kwargs.items():
         if value is None:
             if key in addr_info.keys():
                 raise AssertionError(
                     "key {} unexpectedly returned in getaddressinfo.".format(key))
         elif addr_info[key] != value:
             raise AssertionError(
                 "key {} value {} did not match expected value {}".format(
                     key, addr_info[key], value))
 
 
 def bytes_to_wif(b, compressed=True):
     if compressed:
         b += b'\x01'
     return byte_to_base58(b, 239)
 
 
 def generate_wif_key():
     # Makes a WIF privkey for imports
     k = ECKey()
     k.generate()
     return bytes_to_wif(k.get_bytes(), k.is_compressed)
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index c5e319b8e..d29b9fc57 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -1,902 +1,902 @@
 #!/usr/bin/env python3
 # Copyright (c) 2014-2019 The Bitcoin Core developers
 # Copyright (c) 2017 The Bitcoin developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Run regression test suite.
 
 This module calls down into individual test cases via subprocess. It will
 forward all unrecognized arguments onto the individual test scripts.
 
 For a description of arguments recognized by test scripts, see
 `test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
 
 """
 
 import argparse
-from collections import deque
 import configparser
 import datetime
+import json
+import logging
+import multiprocessing
 import os
-import time
+import re
 import shutil
-import sys
 import subprocess
+import sys
 import tempfile
-import re
-import logging
-import xml.etree.ElementTree as ET
-import json
 import threading
-import multiprocessing
-from queue import Queue, Empty
+import time
 import unittest
+import xml.etree.ElementTree as ET
+from collections import deque
+from queue import Empty, Queue
 
 # Formatting. Default colors to empty strings.
 BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
 try:
     # Make sure python thinks it can write unicode to its stdout
     "\u2713".encode("utf_8").decode(sys.stdout.encoding)
     TICK = "✓ "
     CROSS = "✖ "
     CIRCLE = "○ "
 except UnicodeDecodeError:
     TICK = "P "
     CROSS = "x "
     CIRCLE = "o "
 
 if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393):  # type: ignore
     if os.name == 'nt':
         import ctypes
         kernel32 = ctypes.windll.kernel32  # type: ignore
         ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
         STD_OUTPUT_HANDLE = -11
         STD_ERROR_HANDLE = -12
         # Enable ascii color control to stdout
         stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
         stdout_mode = ctypes.c_int32()
         kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode))
         kernel32.SetConsoleMode(
             stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
         # Enable ascii color control to stderr
         stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE)
         stderr_mode = ctypes.c_int32()
         kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode))
         kernel32.SetConsoleMode(
             stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
     # primitive formatting on supported
     # terminal via ANSI escape sequences:
     BOLD = ('\033[0m', '\033[1m')
     GREEN = ('\033[0m', '\033[0;32m')
     RED = ('\033[0m', '\033[0;31m')
     GREY = ('\033[0m', '\033[1;30m')
 
 TEST_EXIT_PASSED = 0
 TEST_EXIT_SKIPPED = 77
 
 TEST_FRAMEWORK_MODULES = [
     "address",
     "blocktools",
     "messages",
     "muhash",
     "script",
     "util",
 ]
 
 NON_SCRIPTS = [
     # These are python files that live in the functional tests directory, but
     # are not test scripts.
     "combine_logs.py",
     "create_cache.py",
     "test_runner.py",
 ]
 
 TEST_PARAMS = {
     # Some test can be run with additional parameters.
     # When a test is listed here, the it  will be run without parameters
     # as well as with additional parameters listed here.
     # This:
     #    example "testName" : [["--param1", "--param2"] , ["--param3"]]
     # will run the test 3 times:
     #    testName
     #    testName --param1 --param2
     #    testname --param3
     "rpc_bind.py": [["--ipv4"], ["--ipv6"], ["--nonloopback"]],
     "rpc_deriveaddresses.py": [["--usecli"]],
     "wallet_txn_doublespend.py": [["--mineblock"]],
     "wallet_txn_clone.py": [["--mineblock"]],
     "wallet_createwallet.py": [["--usecli"]],
     "wallet_multiwallet.py": [["--usecli"]],
     "wallet_watchonly.py": [["--usecli"]],
 }
 
 # Used to limit the number of tests, when list of tests is not provided on command line
 # When --extended is specified, we run all tests, otherwise
 # we only run a test if its execution time in seconds does not exceed
 # EXTENDED_CUTOFF
 DEFAULT_EXTENDED_CUTOFF = 40
 DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1
 
 
 class TestCase():
     """
     Data structure to hold and run information necessary to launch a test case.
     """
 
     def __init__(self, test_num, test_case, tests_dir,
                  tmpdir, failfast_event, flags=None):
         self.tests_dir = tests_dir
         self.tmpdir = tmpdir
         self.test_case = test_case
         self.test_num = test_num
         self.failfast_event = failfast_event
         self.flags = flags
 
     def run(self):
         if self.failfast_event.is_set():
             return TestResult(self.test_num, self.test_case,
                               "", "Skipped", 0, "", "")
 
         portseed = self.test_num
         portseed_arg = ["--portseed={}".format(portseed)]
         log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
         log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
         test_argv = self.test_case.split()
         testdir = os.path.join("{}", "{}_{}").format(
             self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
         tmpdir_arg = ["--tmpdir={}".format(testdir)]
         start_time = time.time()
         process = subprocess.Popen([sys.executable, os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
                                    universal_newlines=True,
                                    stdout=log_stdout,
                                    stderr=log_stderr)
 
         process.wait()
         log_stdout.seek(0), log_stderr.seek(0)
         [stdout, stderr] = [log.read().decode('utf-8')
                             for log in (log_stdout, log_stderr)]
         log_stdout.close(), log_stderr.close()
         if process.returncode == TEST_EXIT_PASSED and stderr == "":
             status = "Passed"
         elif process.returncode == TEST_EXIT_SKIPPED:
             status = "Skipped"
         else:
             status = "Failed"
 
         return TestResult(self.test_num, self.test_case, testdir, status,
                           time.time() - start_time, stdout, stderr)
 
 
 def on_ci():
     return os.getenv('TRAVIS') == 'true' or os.getenv(
         'TEAMCITY_VERSION') is not None
 
 
 def main():
     # Read config generated by configure.
     config = configparser.ConfigParser()
     configfile = os.path.join(os.path.abspath(
         os.path.dirname(__file__)), "..", "config.ini")
     config.read_file(open(configfile, encoding="utf8"))
 
     src_dir = config["environment"]["SRCDIR"]
     build_dir = config["environment"]["BUILDDIR"]
     tests_dir = os.path.join(src_dir, 'test', 'functional')
 
     # SRCDIR must be set for cdefs.py to find and parse consensus.h
     os.environ["SRCDIR"] = src_dir
 
     # Parse arguments and pass through unrecognised args
     parser = argparse.ArgumentParser(add_help=False,
                                      usage='%(prog)s [test_runner.py options] [script options] [scripts]',
                                      description=__doc__,
                                      epilog='''
     Help text and arguments for individual test script:''',
                                      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
     parser.add_argument('--combinedlogslen', '-c', type=int, default=0, metavar='n',
                         help='On failure, print a log (of length n lines) to '
                              'the console, combined from the test framework '
                              'and all test nodes.')
     parser.add_argument('--coverage', action='store_true',
                         help='generate a basic coverage report for the RPC interface')
     parser.add_argument(
         '--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
     parser.add_argument('--extended', action='store_true',
                         help='run the extended test suite in addition to the basic tests')
     parser.add_argument('--cutoff', type=int, default=DEFAULT_EXTENDED_CUTOFF,
                         help='set the cutoff runtime for what tests get run')
     parser.add_argument('--help', '-h', '-?',
                         action='store_true', help='print help text and exit')
     parser.add_argument('--jobs', '-j', type=int, default=DEFAULT_JOBS,
                         help='how many test scripts to run in parallel.')
     parser.add_argument('--keepcache', '-k', action='store_true',
                         help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
     parser.add_argument('--quiet', '-q', action='store_true',
                         help='only print results summary and failure logs')
     parser.add_argument('--tmpdirprefix', '-t',
                         default=os.path.join(build_dir, 'test', 'tmp'), help="Root directory for datadirs")
     parser.add_argument(
         '--failfast',
         action='store_true',
         help='stop execution after the first test failure')
     parser.add_argument('--junitoutput', '-J',
                         help="File that will store JUnit formatted test results. If no absolute path is given it is treated as relative to the temporary directory.")
     parser.add_argument('--testsuitename', '-n', default='Bitcoin ABC functional tests',
                         help="Name of the test suite, as it will appear in the logs and in the JUnit report.")
     args, unknown_args = parser.parse_known_args()
 
     # args to be passed on always start with two dashes; tests are the
     # remaining unknown args
     tests = [arg for arg in unknown_args if arg[:2] != "--"]
     passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
     passon_args.append("--configfile={}".format(configfile))
 
     # Set up logging
     logging_level = logging.INFO if args.quiet else logging.DEBUG
     logging.basicConfig(format='%(message)s', level=logging_level)
     logging.info("Starting {}".format(args.testsuitename))
 
     # Create base test directory
     tmpdir = os.path.join("{}", "test_runner_₿₵_🏃_{:%Y%m%d_%H%M%S}").format(
         args.tmpdirprefix, datetime.datetime.now())
 
     os.makedirs(tmpdir)
 
     logging.debug("Temporary test directory at {}".format(tmpdir))
 
     if args.junitoutput and not os.path.isabs(args.junitoutput):
         args.junitoutput = os.path.join(tmpdir, args.junitoutput)
 
     enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
 
     if not enable_bitcoind:
         print("No functional tests to run.")
         print("Rerun ./configure with --with-daemon and then make")
         sys.exit(0)
 
     # Build list of tests
     all_scripts = get_all_scripts_from_disk(tests_dir, NON_SCRIPTS)
 
     # Check all tests with parameters actually exist
     for test in TEST_PARAMS:
         if test not in all_scripts:
             print("ERROR: Test with parameter {} does not exist, check it has "
                   "not been renamed or deleted".format(test))
             sys.exit(1)
 
     if tests:
         # Individual tests have been specified. Run specified tests that exist
         # in the all_scripts list. Accept the name with or without .py
         # extension.
         individual_tests = [
             re.sub(r"\.py$", "", test) + ".py" for test in tests if not test.endswith('*')]
         test_list = []
         for test in individual_tests:
             if test in all_scripts:
                 test_list.append(test)
             else:
                 print("{}WARNING!{} Test '{}' not found in full test list.".format(
                     BOLD[1], BOLD[0], test))
 
         # Allow for wildcard at the end of the name, so a single input can
         # match multiple tests
         for test in tests:
             if test.endswith('*'):
                 test_list.extend(
                     [t for t in all_scripts if t.startswith(test[:-1])])
 
         # do not cut off explicitly specified tests
         cutoff = sys.maxsize
     else:
         # Run base tests only
         test_list = all_scripts
         cutoff = sys.maxsize if args.extended else args.cutoff
 
     # Remove the test cases that the user has explicitly asked to exclude.
     if args.exclude:
         exclude_tests = [re.sub(r"\.py$", "", test)
                          + (".py" if ".py" not in test else "") for test in args.exclude.split(',')]
         for exclude_test in exclude_tests:
             if exclude_test in test_list:
                 test_list.remove(exclude_test)
             else:
                 print("{}WARNING!{} Test '{}' not found in current test list.".format(
                     BOLD[1], BOLD[0], exclude_test))
 
     # Update timings from build_dir only if separate build directory is used.
     # We do not want to pollute source directory.
     build_timings = None
     if (src_dir != build_dir):
         build_timings = Timings(os.path.join(build_dir, 'timing.json'))
 
     # Always use timings from scr_dir if present
     src_timings = Timings(os.path.join(
         src_dir, "test", "functional", 'timing.json'))
 
     # Add test parameters and remove long running tests if needed
     test_list = get_tests_to_run(
         test_list, TEST_PARAMS, cutoff, src_timings)
 
     if not test_list:
         print("No valid test scripts specified. Check that your test is in one "
               "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
         sys.exit(0)
 
     if args.help:
         # Print help for test_runner.py, then print help of the first script
         # and exit.
         parser.print_help()
         subprocess.check_call(
             [sys.executable, os.path.join(tests_dir, test_list[0]), '-h'])
         sys.exit(0)
 
     check_script_prefixes(all_scripts)
 
     if not args.keepcache:
         shutil.rmtree(os.path.join(build_dir, "test",
                                    "cache"), ignore_errors=True)
 
     run_tests(
         test_list,
         build_dir,
         tests_dir,
         args.junitoutput,
         tmpdir,
         num_jobs=args.jobs,
         test_suite_name=args.testsuitename,
         enable_coverage=args.coverage,
         args=passon_args,
         combined_logs_len=args.combinedlogslen,
         build_timings=build_timings,
         failfast=args.failfast
     )
 
 
 def run_tests(test_list, build_dir, tests_dir, junitoutput, tmpdir, num_jobs, test_suite_name,
               enable_coverage=False, args=None, combined_logs_len=0, build_timings=None, failfast=False):
     args = args or []
 
     # Warn if bitcoind is already running
     try:
         # pgrep exits with code zero when one or more matching processes found
         if subprocess.run(["pgrep", "-x", "bitcoind"],
                           stdout=subprocess.DEVNULL).returncode == 0:
             print("{}WARNING!{} There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!".format(
                 BOLD[1], BOLD[0]))
     except OSError:
         # pgrep not supported
         pass
 
     # Warn if there is a cache directory
     cache_dir = os.path.join(build_dir, "test", "cache")
     if os.path.isdir(cache_dir):
         print("{}WARNING!{} There is a cache directory here: {}. If tests fail unexpectedly, try deleting the cache directory.".format(
             BOLD[1], BOLD[0], cache_dir))
 
     # Test Framework Tests
     print("Running Unit Tests for Test Framework Modules")
     test_framework_tests = unittest.TestSuite()
     for module in TEST_FRAMEWORK_MODULES:
         test_framework_tests.addTest(
             unittest.TestLoader().loadTestsFromName(
                 "test_framework.{}".format(module)))
     result = unittest.TextTestRunner(
         verbosity=1, failfast=True).run(test_framework_tests)
     if not result.wasSuccessful():
         logging.debug(
             "Early exiting after failure in TestFramework unit tests")
         sys.exit(False)
 
     flags = ['--cachedir={}'.format(cache_dir)] + args
 
     if enable_coverage:
         coverage = RPCCoverage()
         flags.append(coverage.flag)
         logging.debug(
             "Initializing coverage directory at {}".format(coverage.dir))
     else:
         coverage = None
 
     if len(test_list) > 1 and num_jobs > 1:
         # Populate cache
         try:
             subprocess.check_output([sys.executable, os.path.join(
                 tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir={}", "cache") .format(tmpdir)])
         except subprocess.CalledProcessError as e:
             sys.stdout.buffer.write(e.output)
             raise
 
     # Run Tests
     start_time = time.time()
     test_results = execute_test_processes(
         num_jobs, test_list, tests_dir, tmpdir, flags, failfast)
     runtime = time.time() - start_time
 
     max_len_name = len(max(test_list, key=len))
     print_results(test_results, tests_dir, max_len_name,
                   runtime, combined_logs_len)
 
     if junitoutput is not None:
         save_results_as_junit(
             test_results,
             junitoutput,
             runtime,
             test_suite_name)
 
     if (build_timings is not None):
         build_timings.save_timings(test_results)
 
     if coverage:
         coverage_passed = coverage.report_rpc_coverage()
 
         logging.debug("Cleaning up coverage data")
         coverage.cleanup()
     else:
         coverage_passed = True
 
     # Clear up the temp directory if all subdirectories are gone
     if not os.listdir(tmpdir):
         os.rmdir(tmpdir)
 
     all_passed = all(map(
         lambda test_result: test_result.was_successful, test_results)) and coverage_passed
 
     sys.exit(not all_passed)
 
 
 def execute_test_processes(
         num_jobs, test_list, tests_dir, tmpdir, flags, failfast=False):
     update_queue = Queue()
     job_queue = Queue()
     failfast_event = threading.Event()
     test_results = []
     poll_timeout = 10  # seconds
 
     ##
     # Define some helper functions we will need for threading.
     ##
 
     def handle_message(message, running_jobs):
         """
         handle_message handles a single message from handle_test_cases
         """
         if isinstance(message, TestCase):
             running_jobs.append((message.test_num, message.test_case))
             print("{}{}{} started".format(BOLD[1], message.test_case, BOLD[0]))
             return
 
         if isinstance(message, TestResult):
             test_result = message
 
             running_jobs.remove((test_result.num, test_result.name))
             test_results.append(test_result)
 
             if test_result.status == "Passed":
                 print("{}{}{} passed, Duration: {} s".format(
                     BOLD[1], test_result.name, BOLD[0], TimeResolution.seconds(test_result.time)))
             elif test_result.status == "Skipped":
                 print("{}{}{} skipped".format(
                     BOLD[1], test_result.name, BOLD[0]))
             else:
                 print("{}{}{} failed, Duration: {} s\n".format(
                     BOLD[1], test_result.name, BOLD[0], TimeResolution.seconds(test_result.time)))
                 print(BOLD[1] + 'stdout:' + BOLD[0])
                 print(test_result.stdout)
                 print(BOLD[1] + 'stderr:' + BOLD[0])
                 print(test_result.stderr)
 
                 if failfast:
                     logging.debug("Early exiting after test failure")
                     failfast_event.set()
             return
 
         assert False, "we should not be here"
 
     def handle_update_messages():
         """
         handle_update_messages waits for messages to be sent from handle_test_cases via the
         update_queue.  It serializes the results so we can print nice status update messages.
         """
         printed_status = False
         running_jobs = []
 
         while True:
             message = None
             try:
                 message = update_queue.get(True, poll_timeout)
                 if message is None:
                     break
 
                 # We printed a status message, need to kick to the next line
                 # before printing more.
                 if printed_status:
                     print()
                     printed_status = False
 
                 handle_message(message, running_jobs)
                 update_queue.task_done()
             except Empty:
                 if not on_ci():
                     print("Running jobs: {}".format(
                         ", ".join([j[1] for j in running_jobs])), end="\r")
                     sys.stdout.flush()
                     printed_status = True
 
     def handle_test_cases():
         """
         job_runner represents a single thread that is part of a worker pool.
         It waits for a test, then executes that test.
         It also reports start and result messages to handle_update_messages
         """
         while True:
             test = job_queue.get()
             if test is None:
                 break
             # Signal that the test is starting to inform the poor waiting
             # programmer
             update_queue.put(test)
             result = test.run()
             update_queue.put(result)
             job_queue.task_done()
 
     ##
     # Setup our threads, and start sending tasks
     ##
 
     # Start our result collection thread.
     resultCollector = threading.Thread(target=handle_update_messages)
     resultCollector.daemon = True
     resultCollector.start()
 
     # Start some worker threads
     for job in range(num_jobs):
         t = threading.Thread(target=handle_test_cases)
         t.daemon = True
         t.start()
 
     # Push all our test cases into the job queue.
     for i, t in enumerate(test_list):
         job_queue.put(TestCase(i, t, tests_dir, tmpdir, failfast_event, flags))
 
     # Wait for all the jobs to be completed
     job_queue.join()
 
     # Wait for all the results to be compiled
     update_queue.join()
 
     # Flush our queues so the threads exit
     update_queue.put(None)
     for job in range(num_jobs):
         job_queue.put(None)
 
     return test_results
 
 
 def print_results(test_results, tests_dir, max_len_name,
                   runtime, combined_logs_len):
     results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format(
         "TEST".ljust(max_len_name), "STATUS   ", "DURATION") + BOLD[0]
 
     test_results.sort(key=TestResult.sort_key)
     all_passed = True
     time_sum = 0
 
     for test_result in test_results:
         all_passed = all_passed and test_result.was_successful
         time_sum += test_result.time
         test_result.padding = max_len_name
         results += str(test_result)
 
         testdir = test_result.testdir
         if combined_logs_len and os.path.isdir(testdir):
             # Print the final `combinedlogslen` lines of the combined logs
             print('{}Combine the logs and print the last {} lines ...{}'.format(
                 BOLD[1], combined_logs_len, BOLD[0]))
             print('\n============')
             print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
             print('============\n')
             combined_logs_args = [
                 sys.executable, os.path.join(
                     tests_dir, 'combine_logs.py'), testdir]
 
             if BOLD[0]:
                 combined_logs_args += ['--color']
                 combined_logs, _ = subprocess.Popen(
                     combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
             print(
                 "\n".join(
                     deque(
                         combined_logs.splitlines(),
                         combined_logs_len)))
 
     status = TICK + "Passed" if all_passed else CROSS + "Failed"
     if not all_passed:
         results += RED[1]
     results += BOLD[1] + "\n{} | {} | {} s (accumulated) \n".format(
         "ALL".ljust(max_len_name), status.ljust(9), TimeResolution.seconds(time_sum)) + BOLD[0]
     if not all_passed:
         results += RED[0]
     results += "Runtime: {} s\n".format(TimeResolution.seconds(runtime))
     print(results)
 
 
 class TestResult():
     """
     Simple data structure to store test result values and print them properly
     """
 
     def __init__(self, num, name, testdir, status, time, stdout, stderr):
         self.num = num
         self.name = name
         self.testdir = testdir
         self.status = status
         self.time = time
         self.padding = 0
         self.stdout = stdout
         self.stderr = stderr
 
     def sort_key(self):
         if self.status == "Passed":
             return 0, self.name.lower()
         elif self.status == "Failed":
             return 2, self.name.lower()
         elif self.status == "Skipped":
             return 1, self.name.lower()
 
     def __repr__(self):
         if self.status == "Passed":
             color = GREEN
             glyph = TICK
         elif self.status == "Failed":
             color = RED
             glyph = CROSS
         elif self.status == "Skipped":
             color = GREY
             glyph = CIRCLE
 
         return color[1] + "{} | {}{} | {} s\n".format(
             self.name.ljust(self.padding), glyph, self.status.ljust(7), TimeResolution.seconds(self.time)) + color[0]
 
     @property
     def was_successful(self):
         return self.status != "Failed"
 
 
 def get_all_scripts_from_disk(test_dir, non_scripts):
     """
     Return all available test script from script directory (excluding NON_SCRIPTS)
     """
     python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"])
     return list(python_files - set(non_scripts))
 
 
 def check_script_prefixes(all_scripts):
     """Check that no more than `EXPECTED_VIOLATION_COUNT` of the
        test scripts don't start with one of the allowed name prefixes."""
     EXPECTED_VIOLATION_COUNT = 16
 
     # LEEWAY is provided as a transition measure, so that pull-requests
     # that introduce new tests that don't conform with the naming
     # convention don't immediately cause the tests to fail.
     LEEWAY = 0
 
     good_prefixes_re = re.compile(
         "(abc_)?(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool)_")
     bad_script_names = [
         script for script in all_scripts if good_prefixes_re.match(script) is None]
 
     if len(bad_script_names) < EXPECTED_VIOLATION_COUNT:
         print(
             "{}HURRAY!{} Number of functional tests violating naming convention reduced!".format(
                 BOLD[1],
                 BOLD[0]))
         print("Consider reducing EXPECTED_VIOLATION_COUNT from {} to {}".format(
             EXPECTED_VIOLATION_COUNT, len(bad_script_names)))
     elif len(bad_script_names) > EXPECTED_VIOLATION_COUNT:
         print(
             "INFO: {} tests not meeting naming conventions (expected {}):".format(len(bad_script_names), EXPECTED_VIOLATION_COUNT))
         print("  {}".format("\n  ".join(sorted(bad_script_names))))
         assert len(bad_script_names) <= EXPECTED_VIOLATION_COUNT + \
             LEEWAY, "Too many tests not following naming convention! ({} found, expected: <= {})".format(
                 len(bad_script_names), EXPECTED_VIOLATION_COUNT)
 
 
 def get_tests_to_run(test_list, test_params, cutoff, src_timings):
     """
     Returns only test that will not run longer that cutoff.
     Long running tests are returned first to favor running tests in parallel
     Timings from build directory override those from src directory
     """
 
     def get_test_time(test):
         # Return 0 if test is unknown to always run it
         return next(
             (x['time'] for x in src_timings.existing_timings if x['name'] == test), 0)
 
     # Some tests must also be run with additional parameters. Add them to the
     # list.
     tests_with_params = []
     for test_name in test_list:
         # always execute a test without parameters
         tests_with_params.append(test_name)
         params = test_params.get(test_name)
         if params is not None:
             tests_with_params.extend(
                 [test_name + " " + " ".join(parameter) for parameter in params])
 
     result = [
         test for test in tests_with_params if get_test_time(test) <= cutoff]
     result.sort(key=lambda x: (-get_test_time(x), x))
     return result
 
 
 class RPCCoverage():
     """
     Coverage reporting utilities for test_runner.
 
     Coverage calculation works by having each test script subprocess write
     coverage files into a particular directory. These files contain the RPC
     commands invoked during testing, as well as a complete listing of RPC
     commands per `bitcoin-cli help` (`rpc_interface.txt`).
 
     After all tests complete, the commands run are combined and diff'd against
     the complete list to calculate uncovered RPC commands.
 
     See also: test/functional/test_framework/coverage.py
 
     """
 
     def __init__(self):
         self.dir = tempfile.mkdtemp(prefix="coverage")
         self.flag = '--coveragedir={}'.format(self.dir)
 
     def report_rpc_coverage(self):
         """
         Print out RPC commands that were unexercised by tests.
 
         """
         uncovered = self._get_uncovered_rpc_commands()
 
         if uncovered:
             print("Uncovered RPC commands:")
             print("".join(("  - {}\n".format(i)) for i in sorted(uncovered)))
             return False
         else:
             print("All RPC commands covered.")
             return True
 
     def cleanup(self):
         return shutil.rmtree(self.dir)
 
     def _get_uncovered_rpc_commands(self):
         """
         Return a set of currently untested RPC commands.
 
         """
         # This is shared from `test/functional/test_framework/coverage.py`
         reference_filename = 'rpc_interface.txt'
         coverage_file_prefix = 'coverage.'
 
         coverage_ref_filename = os.path.join(self.dir, reference_filename)
         coverage_filenames = set()
         all_cmds = set()
         # Consider RPC generate covered, because it is overloaded in
         # test_framework/test_node.py and not seen by the coverage check.
         covered_cmds = set({'generate'})
 
         if not os.path.isfile(coverage_ref_filename):
             raise RuntimeError("No coverage reference found")
 
         with open(coverage_ref_filename, 'r', encoding="utf8") as file:
             all_cmds.update([line.strip() for line in file.readlines()])
 
         for root, _, files in os.walk(self.dir):
             for filename in files:
                 if filename.startswith(coverage_file_prefix):
                     coverage_filenames.add(os.path.join(root, filename))
 
         for filename in coverage_filenames:
             with open(filename, 'r', encoding="utf8") as file:
                 covered_cmds.update([line.strip()
                                      for line in file.readlines()])
 
         return all_cmds - covered_cmds
 
 
 def save_results_as_junit(test_results, file_name, time, test_suite_name):
     """
     Save tests results to file in JUnit format
 
     See http://llg.cubic.org/docs/junit/ for specification of format
     """
     e_test_suite = ET.Element("testsuite",
                               {"name": "{}".format(test_suite_name),
                                "tests": str(len(test_results)),
                                # "errors":
                                "failures": str(len([t for t in test_results if t.status == "Failed"])),
                                "id": "0",
                                "skipped": str(len([t for t in test_results if t.status == "Skipped"])),
                                "time": str(TimeResolution.milliseconds(time)),
                                "timestamp": datetime.datetime.now().isoformat('T')
                                })
 
     for test_result in test_results:
         e_test_case = ET.SubElement(e_test_suite, "testcase",
                                     {"name": test_result.name,
                                      "classname": test_result.name,
                                      "time": str(TimeResolution.milliseconds(test_result.time))
                                      }
                                     )
         if test_result.status == "Skipped":
             ET.SubElement(e_test_case, "skipped")
         elif test_result.status == "Failed":
             ET.SubElement(e_test_case, "failure")
         # no special element for passed tests
 
         ET.SubElement(e_test_case, "system-out").text = test_result.stdout
         ET.SubElement(e_test_case, "system-err").text = test_result.stderr
 
     ET.ElementTree(e_test_suite).write(
         file_name, "UTF-8", xml_declaration=True)
 
 
 class Timings():
     """
     Takes care of loading, merging and saving tests execution times.
     """
 
     def __init__(self, timing_file):
         self.timing_file = timing_file
         self.existing_timings = self.load_timings()
 
     def load_timings(self):
         if os.path.isfile(self.timing_file):
             with open(self.timing_file, encoding="utf8") as file:
                 return json.load(file)
         else:
             return []
 
     def get_merged_timings(self, new_timings):
         """
         Return new list containing existing timings updated with new timings
         Tests that do not exists are not removed
         """
 
         key = 'name'
         merged = {}
         for item in self.existing_timings + new_timings:
             if item[key] in merged:
                 merged[item[key]].update(item)
             else:
                 merged[item[key]] = item
 
         # Sort the result to preserve test ordering in file
         merged = list(merged.values())
         merged.sort(key=lambda t, key=key: t[key])
         return merged
 
     def save_timings(self, test_results):
         # we only save test that have passed - timings for failed test might be
         # wrong (timeouts or early fails)
         passed_results = [
             test for test in test_results if test.status == 'Passed']
         new_timings = list(map(lambda test: {'name': test.name, 'time': TimeResolution.seconds(test.time)},
                                passed_results))
         merged_timings = self.get_merged_timings(new_timings)
 
         with open(self.timing_file, 'w', encoding="utf8") as file:
             json.dump(merged_timings, file, indent=True)
 
 
 class TimeResolution:
     @staticmethod
     def seconds(time_fractional_second):
         return round(time_fractional_second)
 
     @staticmethod
     def milliseconds(time_fractional_second):
         return round(time_fractional_second, 3)
 
 
 if __name__ == '__main__':
     main()
diff --git a/test/fuzz/test_runner.py b/test/fuzz/test_runner.py
index a0300c732..d30ec089d 100755
--- a/test/fuzz/test_runner.py
+++ b/test/fuzz/test_runner.py
@@ -1,300 +1,300 @@
 #!/usr/bin/env python3
 # Copyright (c) 2019 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Run fuzz test targets.
 """
 
 import argparse
-from concurrent.futures import ThreadPoolExecutor, as_completed
 import configparser
 import logging
 import os
 import subprocess
 import sys
+from concurrent.futures import ThreadPoolExecutor, as_completed
 
 
 def main():
     parser = argparse.ArgumentParser(
         formatter_class=argparse.ArgumentDefaultsHelpFormatter,
         description='''Run the fuzz targets with all inputs from the seed_dir once.''',
     )
     parser.add_argument(
         "-l",
         "--loglevel",
         dest="loglevel",
         default="INFO",
         help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console.",
     )
     parser.add_argument(
         '--valgrind',
         action='store_true',
         help='If true, run fuzzing binaries under the valgrind memory error detector',
     )
     parser.add_argument(
         '-x',
         '--exclude',
         help="A comma-separated list of targets to exclude",
     )
     parser.add_argument(
         '--par',
         '-j',
         type=int,
         default=4,
         help='How many targets to merge or execute in parallel.',
     )
     parser.add_argument(
         'seed_dir',
         help='The seed corpus to run on (must contain subfolders for each fuzz target).',
     )
     parser.add_argument(
         'target',
         nargs='*',
         help='The target(s) to run. Default is to run all targets.',
     )
     parser.add_argument(
         '--m_dir',
         help='Merge inputs from this directory into the seed_dir. Needs /target subdirectory.',
     )
     parser.add_argument(
         '-g',
         '--generate',
         action='store_true',
         help='Create new corpus seeds (or extend the existing ones) by running'
              ' the given targets for a finite number of times. Outputs them to'
              ' the passed seed_dir.'
     )
 
     args = parser.parse_args()
 
     # Set up logging
     logging.basicConfig(
         format='%(message)s',
         level=int(args.loglevel) if args.loglevel.isdigit(
         ) else args.loglevel.upper(),
     )
 
     # Read config generated by configure.
     config = configparser.ConfigParser()
     configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
     config.read_file(open(configfile, encoding="utf8"))
 
     if not config["components"].getboolean("ENABLE_FUZZ"):
         logging.error("Must have fuzz targets built")
         sys.exit(1)
     test_dir = os.path.join(
         config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz')
 
     # Build list of tests
     test_list_all = [
         f for f in os.listdir(test_dir)
         if os.path.isfile(os.path.join(test_dir, f)) and
         os.access(os.path.join(test_dir, f), os.X_OK)]
 
     if not test_list_all:
         logging.error("No fuzz targets found")
         sys.exit(1)
 
     logging.debug(
         "{} fuzz target(s) found: {}".format(
             len(test_list_all), " ".join(
                 sorted(test_list_all))))
 
     # By default run all
     args.target = args.target or test_list_all
     test_list_error = list(set(args.target).difference(set(test_list_all)))
     if test_list_error:
         logging.error(
             "Unknown fuzz targets selected: {}".format(test_list_error))
     test_list_selection = list(
         set(test_list_all).intersection(set(args.target)))
     if not test_list_selection:
         logging.error("No fuzz targets selected")
     if args.exclude:
         for excluded_target in args.exclude.split(","):
             if excluded_target not in test_list_selection:
                 logging.error(
                     "Target \"{}\" not found in current target list.".format(excluded_target))
                 continue
             test_list_selection.remove(excluded_target)
     test_list_selection.sort()
 
     logging.info(
         "{} of {} detected fuzz target(s) selected: {}".format(
             len(test_list_selection),
             len(test_list_all),
             " ".join(test_list_selection)))
 
     if not args.generate:
         test_list_seedless = []
         for t in test_list_selection:
             corpus_path = os.path.join(args.seed_dir, t)
             if not os.path.exists(corpus_path) or len(
                     os.listdir(corpus_path)) == 0:
                 test_list_seedless.append(t)
         test_list_seedless.sort()
         if test_list_seedless:
             logging.info(
                 "Fuzzing harnesses lacking a seed corpus: {}".format(
                     " ".join(test_list_seedless)
                 )
             )
             logging.info(
                 "Please consider adding a fuzz seed corpus at https://github.com/Bitcoin-ABC/qa-assets")
 
     try:
         help_output = subprocess.run(
             args=[
                 os.path.join(test_dir, test_list_selection[0]),
                 '-help=1',
             ],
             timeout=20,
             check=True,
             stderr=subprocess.PIPE,
             universal_newlines=True,
         ).stderr
         if "libFuzzer" not in help_output:
             logging.error("Must be built with libFuzzer")
             sys.exit(1)
     except subprocess.TimeoutExpired:
         logging.error(
             "subprocess timed out: Currently only libFuzzer is supported")
         sys.exit(1)
 
     with ThreadPoolExecutor(max_workers=args.par) as fuzz_pool:
         if args.generate:
             return generate_corpus_seeds(
                 fuzz_pool=fuzz_pool,
                 test_dir=test_dir,
                 seed_dir=args.seed_dir,
                 targets=test_list_selection,
             )
 
         if args.m_dir:
             merge_inputs(
                 fuzz_pool=fuzz_pool,
                 corpus=args.seed_dir,
                 test_list=test_list_selection,
                 test_dir=test_dir,
                 merge_dir=args.m_dir,
             )
             return
 
         run_once(
             fuzz_pool=fuzz_pool,
             corpus=args.seed_dir,
             test_list=test_list_selection,
             test_dir=test_dir,
             use_valgrind=args.valgrind,
         )
 
 
 def generate_corpus_seeds(*, fuzz_pool, test_dir, seed_dir, targets):
     """Generates new corpus seeds.
 
     Run {targets} without input, and outputs the generated corpus seeds to
     {seed_dir}.
     """
     logging.info("Generating corpus seeds to {}".format(seed_dir))
 
     def job(command):
         logging.debug("Running '{}'\n".format(" ".join(command)))
         logging.debug("Command '{}' output:\n'{}'\n".format(
             ' '.join(command),
             subprocess.run(command, check=True, stderr=subprocess.PIPE,
                            universal_newlines=True).stderr
         ))
 
     futures = []
     for target in targets:
         target_seed_dir = os.path.join(seed_dir, target)
         os.makedirs(target_seed_dir, exist_ok=True)
         command = [
             os.path.join(test_dir, target),
             "-runs=100000",
             target_seed_dir,
         ]
         futures.append(fuzz_pool.submit(job, command))
 
     for future in as_completed(futures):
         future.result()
 
 
 def merge_inputs(*, fuzz_pool, corpus, test_list, test_dir, merge_dir):
     logging.info(
         "Merge the inputs in the passed dir into the seed_dir. Passed dir {}".format(merge_dir))
     jobs = []
     for t in test_list:
         args = [
             os.path.join(test_dir, t),
             '-merge=1',
             # Also done by oss-fuzz
             # https://github.com/google/oss-fuzz/issues/1406#issuecomment-387790487
             '-use_value_profile=1',
             os.path.join(corpus, t),
             os.path.join(merge_dir, t),
         ]
         os.makedirs(os.path.join(corpus, t), exist_ok=True)
         os.makedirs(os.path.join(merge_dir, t), exist_ok=True)
 
         def job(t, args):
             output = 'Run {} with args {}\n'.format(t, " ".join(args))
             output += subprocess.run(args,
                                      check=True,
                                      stderr=subprocess.PIPE,
                                      universal_newlines=True).stderr
             logging.debug(output)
 
         jobs.append(fuzz_pool.submit(job, t, args))
 
     for future in as_completed(jobs):
         future.result()
 
 
 def run_once(*, fuzz_pool, corpus, test_list, test_dir, use_valgrind):
     jobs = []
     for t in test_list:
         corpus_path = os.path.join(corpus, t)
         os.makedirs(corpus_path, exist_ok=True)
         args = [
             os.path.join(test_dir, t),
             '-runs=1',
             corpus_path,
         ]
         if use_valgrind:
             args = [
                 'valgrind',
                 '--quiet',
                 '--error-exitcode=1'] + args
 
         def job(t, args):
             output = 'Run {} with args {}'.format(t, args)
             result = subprocess.run(
                 args,
                 stderr=subprocess.PIPE,
                 universal_newlines=True)
             output += result.stderr
             return output, result
 
         jobs.append(fuzz_pool.submit(job, t, args))
 
     for future in as_completed(jobs):
         output, result = future.result()
         logging.debug(output)
         try:
             result.check_returncode()
         except subprocess.CalledProcessError as e:
             if e.stdout:
                 logging.info(e.stdout)
             if e.stderr:
                 logging.info(e.stderr)
             logging.info(
                 "Target \"{}\" failed with exit code {}".format(
                     " ".join(
                         result.args),
                     e.returncode))
             sys.exit(1)
 
 
 if __name__ == '__main__':
     main()
diff --git a/test/lint/check-doc.py b/test/lint/check-doc.py
index c44826981..ef7b50fbc 100755
--- a/test/lint/check-doc.py
+++ b/test/lint/check-doc.py
@@ -1,96 +1,96 @@
 #!/usr/bin/env python3
 # Copyright (c) 2015-2019 The Bitcoin Core developers
 # Copyright (c) 2019 The Bitcoin developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 '''
 This checks if all command line args are documented.
 Return value is 0 to indicate no error.
 
 Author: @MarcoFalke
 '''
 
-from subprocess import check_output
-from pprint import PrettyPrinter
 import glob
 import re
+from pprint import PrettyPrinter
+from subprocess import check_output
 
 TOP_LEVEL = 'git rev-parse --show-toplevel'
 FOLDER_SRC = '/src/**/'
 FOLDER_TEST = '/src/**/test/'
 
 EXTENSIONS = ["*.c", "*.h", "*.cpp", "*.cc", "*.hpp"]
 REGEX_ARG = r'(?:ForceSet|SoftSet|Get|Is)(?:Bool)?Args?(?:Set)?\(\s*"(-[^"]+)"'
 REGEX_DOC = r'AddArg\(\s*"(-[^"=]+?)(?:=|")'
 
 # list false positive unknows arguments
 SET_FALSE_POSITIVE_UNKNOWNS = set([
     '-includeconf',
     '-regtest',
     '-testnet',
     '-zmqpubhashblock',
     '-zmqpubhashtx',
     '-zmqpubrawblock',
     '-zmqpubrawtx',
     '-zmqpubhashblockhwm',
     '-zmqpubhashtxhwm',
     '-zmqpubrawblockhwm',
     '-zmqpubrawtxhwm',
 ])
 
 # list false positive undocumented arguments
 SET_FALSE_POSITIVE_UNDOCUMENTED = set([
     '-help',
     '-h',
     '-dbcrashratio',
     '-enableminerfund',
     '-forcecompactdb',
     '-parkdeepreorg',
     '-automaticunparking',
     # Removed arguments that now just print a helpful error message
     '-zapwallettxes',
     # Remove after November 2020 upgrade
     '-axionactivationtime',
     '-replayprotectionactivationtime',
 ])
 
 
 def main():
     top_level = check_output(TOP_LEVEL, shell=True,
                              universal_newlines=True, encoding='utf8').strip()
     source_files = []
     test_files = []
 
     for extension in EXTENSIONS:
         source_files += glob.glob(top_level +
                                   FOLDER_SRC + extension, recursive=True)
         test_files += glob.glob(top_level + FOLDER_TEST +
                                 extension, recursive=True)
 
     files = set(source_files) - set(test_files)
 
     args_used = set()
     args_docd = set()
     for file in files:
         with open(file, 'r', encoding='utf-8') as f:
             content = f.read()
             args_used |= set(re.findall(re.compile(REGEX_ARG), content))
             args_docd |= set(re.findall(re.compile(REGEX_DOC), content))
 
     args_used |= SET_FALSE_POSITIVE_UNKNOWNS
     args_docd |= SET_FALSE_POSITIVE_UNDOCUMENTED
     args_need_doc = args_used - args_docd
     args_unknown = args_docd - args_used
 
     pp = PrettyPrinter()
     print("Args used        : {}".format(len(args_used)))
     print("Args documented  : {}".format(len(args_docd)))
     print("Args undocumented: {}".format(len(args_need_doc)))
     pp.pprint(args_need_doc)
     print("Args unknown     : {}".format(len(args_unknown)))
     pp.pprint(args_unknown)
 
 
 if __name__ == "__main__":
     main()
diff --git a/test/lint/check-rpc-mappings.py b/test/lint/check-rpc-mappings.py
index 31608305b..66ab8bd9a 100755
--- a/test/lint/check-rpc-mappings.py
+++ b/test/lint/check-rpc-mappings.py
@@ -1,193 +1,193 @@
 #!/usr/bin/env python3
 # Copyright (c) 2017-2018 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 """Check RPC argument consistency."""
 
-from collections import defaultdict
 import glob
 import os
 import re
 import sys
+from collections import defaultdict
 
 # Source files (relative to root) to scan for dispatch tables
 SOURCE_PATTERNS = [
     "src/rpc/*.cpp",
     "src/wallet/rpc*.cpp",
     "src/zmq/zmqrpc.cpp",
 ]
 # Source file (relative to root) containing conversion mapping
 SOURCE_CLIENT = 'src/rpc/client.cpp'
 # Argument names that should be ignored in consistency checks
 IGNORE_DUMMY_ARGS = {'dummy', 'arg0', 'arg1', 'arg2',
                      'arg3', 'arg4', 'arg5', 'arg6', 'arg7', 'arg8', 'arg9'}
 
 
 class RPCCommand:
     def __init__(self, name, args):
         self.name = name
         self.args = args
 
 
 class RPCArgument:
     def __init__(self, names, idx):
         self.names = names
         self.idx = idx
         self.convert = False
 
 
 def parse_string(s):
     assert s[0] == '"'
     assert s[-1] == '"'
     return s[1:-1]
 
 
 def process_commands(fname):
     """Find and parse dispatch table in implementation file `fname`."""
     cmds = []
     in_rpcs = False
     with open(fname, "r", encoding="utf8") as f:
         for line in f:
             line = line.strip()
             if not in_rpcs:
                 if re.match(
                         r"static const CRPCCommand .*\[\] =", line):
                     in_rpcs = True
             else:
                 if line.startswith('};'):
                     in_rpcs = False
                 elif '{' in line and '"' in line:
                     m = re.search(
                         '{ *("[^"]*"), *("[^"]*"), *([^,]*), *{([^}]*)} *},', line)
                     assert m, 'No match to table expression: {}'.format(line)
                     name = parse_string(m.group(2))
                     args_str = m.group(4).strip()
                     if args_str:
                         args = [RPCArgument(parse_string(x.strip()).split(
                             '|'), idx) for idx, x in enumerate(args_str.split(','))]
                     else:
                         args = []
                     cmds.append(RPCCommand(name, args))
     assert not in_rpcs, "Something went wrong with parsing the C++ file: update the regexps"
     return cmds
 
 
 def process_mapping(fname):
     """Find and parse conversion table in implementation file `fname`."""
     cmds = []
     in_rpcs = False
     with open(fname, "r", encoding="utf8") as f:
         for line in f:
             line = line.strip()
             if not in_rpcs:
                 if line == 'static const CRPCConvertParam vRPCConvertParams[] = {':
                     in_rpcs = True
             else:
                 if line.startswith('};'):
                     in_rpcs = False
                 elif '{' in line and '"' in line:
                     m = re.search(
                         '{ *("[^"]*"), *([0-9]+) *, *("[^"]*") *},', line)
                     assert m, 'No match to table expression: {}'.format(line)
                     name = parse_string(m.group(1))
                     idx = int(m.group(2))
                     argname = parse_string(m.group(3))
                     cmds.append((name, idx, argname))
     assert not in_rpcs and cmds
     return cmds
 
 
 def main():
     if len(sys.argv) != 2:
         print('Usage: {} ROOT-DIR'.format(sys.argv[0]), file=sys.stderr)
         sys.exit(1)
 
     root = sys.argv[1]
 
     # Find the sources files
     sources = []
     for glob_regex in SOURCE_PATTERNS:
         sources.extend(glob.glob(os.path.join(root, glob_regex)))
 
     # Get all commands from dispatch tables
     cmds = []
     for fname in set(sources):
         cmds += process_commands(fname)
 
     cmds_by_name = {}
     for cmd in cmds:
         cmds_by_name[cmd.name] = cmd
 
     print(cmds_by_name)
     # Get current convert mapping for client
     client = SOURCE_CLIENT
     mapping = set(process_mapping(os.path.join(root, client)))
 
     print('* Checking consistency between dispatch tables and vRPCConvertParams')
 
     # Check mapping consistency
     errors = 0
     for (cmdname, argidx, argname) in mapping:
         try:
             rargnames = cmds_by_name[cmdname].args[argidx].names
         except IndexError:
             print('ERROR: {} argument {} (named {} in vRPCConvertParams) is not defined in dispatch table'.format(
                 cmdname, argidx, argname))
             errors += 1
             continue
         if argname not in rargnames:
             print('ERROR: {} argument {} is named {} in vRPCConvertParams but {} in dispatch table'.format(
                 cmdname, argidx, argname, rargnames), file=sys.stderr)
             errors += 1
 
     # Check for conflicts in vRPCConvertParams conversion
     # All aliases for an argument must either be present in the
     # conversion table, or not. Anything in between means an oversight
     # and some aliases won't work.
     for cmd in cmds:
         for arg in cmd.args:
             convert = [((cmd.name, arg.idx, argname) in mapping)
                        for argname in arg.names]
             if any(convert) != all(convert):
                 print('ERROR: {} argument {} has conflicts in vRPCConvertParams conversion specifier {}'.format(
                     cmd.name, arg.names, convert))
                 errors += 1
             arg.convert = all(convert)
 
     # Check for conversion difference by argument name.
     # It is preferable for API consistency that arguments with the same name
     # have the same conversion, so bin by argument name.
     all_methods_by_argname = defaultdict(list)
     converts_by_argname = defaultdict(list)
     for cmd in cmds:
         for arg in cmd.args:
             for argname in arg.names:
                 all_methods_by_argname[argname].append(cmd.name)
                 converts_by_argname[argname].append(arg.convert)
 
     for argname, convert in converts_by_argname.items():
         if all(convert) != any(convert):
             if argname in IGNORE_DUMMY_ARGS:
                 # these are testing or dummy, don't warn for them
                 continue
             formattedCommands = []
             for (cmd, convert) in list(
                     zip(all_methods_by_argname[argname], converts_by_argname[argname])):
                 argType = 'string'
                 if convert:
                     argType = 'JSON'
                 formattedCommands.append(
                     "'{}' has argument '{}' of type '{}'".format(
                         cmd, argname, argType))
             print("WARNING: In order to keep a consistent API, arguments of the same name are expected to either both "
                   "be string-typed or converted from JSON. But there was a conversion mismatch: {}. "
                   "Common root causes for this warning: 1) The command and/or argument are missing from the conversion "
                   "table in '{}'. 2) Arguments of the same name are being converted from JSON for some commands, but not "
                   "for others. Consider renaming arguments such that one name is used for strings and the other for "
                   "conversions from JSON.".format(formattedCommands, SOURCE_CLIENT))
 
     sys.exit(0)
 
 
 if __name__ == '__main__':
     main()