diff --git a/qa/rpc-tests/abc-p2p-fullblocktest.py b/qa/rpc-tests/abc-p2p-fullblocktest.py
--- a/qa/rpc-tests/abc-p2p-fullblocktest.py
+++ b/qa/rpc-tests/abc-p2p-fullblocktest.py
@@ -21,6 +21,7 @@
 from test_framework.cdefs import (ONE_MEGABYTE, LEGACY_MAX_BLOCK_SIZE,
                                   MAX_BLOCK_SIGOPS_PER_MB, MAX_TX_SIGOPS_COUNT)
 
+
 class PreviousSpendableOutput(object):
 
     def __init__(self, tx=CTransaction(), n=-1):
diff --git a/qa/rpc-tests/abc-rpc.py b/qa/rpc-tests/abc-rpc.py
--- a/qa/rpc-tests/abc-rpc.py
+++ b/qa/rpc-tests/abc-rpc.py
@@ -15,6 +15,7 @@
                                   LEGACY_MAX_BLOCK_SIZE,
                                   DEFAULT_MAX_BLOCK_SIZE)
 
+
 class ABC_RPC_Test (BitcoinTestFramework):
 
     def __init__(self):
diff --git a/qa/rpc-tests/assumevalid.py b/qa/rpc-tests/assumevalid.py
--- a/qa/rpc-tests/assumevalid.py
+++ b/qa/rpc-tests/assumevalid.py
@@ -16,7 +16,7 @@
     2-101:    bury that block with 100 blocks so the coinbase transaction
               output can be spent
     102:      a block containing a transaction spending the coinbase
-              transaction output. The transaction has an invalid signature. 
+              transaction output. The transaction has an invalid signature.
     103-2202: bury the bad block with just over two weeks' worth of blocks
               (2100 blocks)
 
@@ -38,7 +38,9 @@
 from test_framework.key import CECKey
 from test_framework.script import *
 
+
 class BaseNode(SingleNodeConnCB):
+
     def __init__(self):
         SingleNodeConnCB.__init__(self)
         self.last_inv = None
@@ -60,10 +62,12 @@
 
     def send_header_for_blocks(self, new_blocks):
         headers_message = msg_headers()
-        headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
+        headers_message.headers = [CBlockHeader(b) for b in new_blocks]
         self.send_message(headers_message)
 
+
 class SendHeadersTest(BitcoinTestFramework):
+
     def __init__(self):
         super().__init__()
         self.setup_clean_chain = True
@@ -81,15 +85,17 @@
         # Connect to node0
         node0 = BaseNode()
         connections = []
-        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
+        connections.append(
+            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
         node0.add_connection(connections[0])
 
-        NetworkThread().start() # Start up network handling in another thread
+        NetworkThread().start()  # Start up network handling in another thread
         node0.wait_for_verack()
 
         # Build the blockchain
         self.tip = int(self.nodes[0].getbestblockhash(), 16)
-        self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
+        self.block_time = self.nodes[0].getblock(
+            self.nodes[0].getbestblockhash())['time'] + 1
 
         self.blocks = []
 
@@ -100,7 +106,8 @@
 
         # Create the first block with a coinbase output to our key
         height = 1
-        block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time)
+        block = create_block(self.tip, create_coinbase(
+            height, coinbase_pubkey), self.block_time)
         self.blocks.append(block)
         self.block_time += 1
         block.solve()
@@ -111,20 +118,24 @@
 
         # Bury the block 100 deep so the coinbase output is spendable
         for i in range(100):
-            block = create_block(self.tip, create_coinbase(height), self.block_time)
+            block = create_block(
+                self.tip, create_coinbase(height), self.block_time)
             block.solve()
             self.blocks.append(block)
             self.tip = block.sha256
             self.block_time += 1
             height += 1
 
-        # Create a transaction spending the coinbase output with an invalid (null) signature
+        # Create a transaction spending the coinbase output with an invalid
+        # (null) signature
         tx = CTransaction()
-        tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
-        tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
+        tx.vin.append(
+            CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
+        tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE])))
         tx.calc_sha256()
 
-        block102 = create_block(self.tip, create_coinbase(height), self.block_time)
+        block102 = create_block(
+            self.tip, create_coinbase(height), self.block_time)
         self.block_time += 1
         block102.vtx.extend([tx])
         block102.hashMerkleRoot = block102.calc_merkle_root()
@@ -137,7 +148,8 @@
 
         # Bury the assumed valid block 2100 deep
         for i in range(2100):
-            block = create_block(self.tip, create_coinbase(height), self.block_time)
+            block = create_block(
+                self.tip, create_coinbase(height), self.block_time)
             block.nVersion = 4
             block.solve()
             self.blocks.append(block)
@@ -145,18 +157,21 @@
             self.block_time += 1
             height += 1
 
-        # Start node1 and node2 with assumevalid so they accept a block with a bad signature.
+        # Start node1 and node2 with assumevalid so they accept a block with a
+        # bad signature.
         self.nodes.append(start_node(1, self.options.tmpdir,
                                      ["-debug", "-assumevalid=" + hex(block102.sha256)]))
         node1 = BaseNode()  # connects to node1
-        connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1))
+        connections.append(
+            NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1))
         node1.add_connection(connections[1])
         node1.wait_for_verack()
 
         self.nodes.append(start_node(2, self.options.tmpdir,
                                      ["-debug", "-assumevalid=" + hex(block102.sha256)]))
         node2 = BaseNode()  # connects to node2
-        connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
+        connections.append(
+            NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
         node2.add_connection(connections[2])
         node2.wait_for_verack()
 
@@ -170,22 +185,25 @@
         # Send 102 blocks to node0. Block 102 will be rejected.
         for i in range(101):
             node0.send_message(msg_block(self.blocks[i]))
-        node0.sync_with_ping() # make sure the most recent block is synced
+        node0.sync_with_ping()  # make sure the most recent block is synced
         node0.send_message(msg_block(self.blocks[101]))
-        assert_equal(self.nodes[0].getblock(self.nodes[0].getbestblockhash())['height'], 101)
+        assert_equal(self.nodes[0].getblock(
+            self.nodes[0].getbestblockhash())['height'], 101)
 
         # Send 3102 blocks to node1. All blocks will be accepted.
         for i in range(2202):
             node1.send_message(msg_block(self.blocks[i]))
-        node1.sync_with_ping() # make sure the most recent block is synced
-        assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)
+        node1.sync_with_ping()  # make sure the most recent block is synced
+        assert_equal(self.nodes[1].getblock(
+            self.nodes[1].getbestblockhash())['height'], 2202)
 
         # Send 102 blocks to node2. Block 102 will be rejected.
         for i in range(101):
             node2.send_message(msg_block(self.blocks[i]))
-        node2.sync_with_ping() # make sure the most recent block is synced
+        node2.sync_with_ping()  # make sure the most recent block is synced
         node2.send_message(msg_block(self.blocks[101]))
-        assert_equal(self.nodes[2].getblock(self.nodes[2].getbestblockhash())['height'], 101)
+        assert_equal(self.nodes[2].getblock(
+            self.nodes[2].getbestblockhash())['height'], 101)
 
 if __name__ == '__main__':
     SendHeadersTest().main()
diff --git a/qa/rpc-tests/bip65-cltv.py b/qa/rpc-tests/bip65-cltv.py
--- a/qa/rpc-tests/bip65-cltv.py
+++ b/qa/rpc-tests/bip65-cltv.py
@@ -10,7 +10,9 @@
 from test_framework.test_framework import BitcoinTestFramework
 from test_framework.util import *
 
+
 class BIP65Test(BitcoinTestFramework):
+
     def __init__(self):
         super().__init__()
         self.num_nodes = 3
@@ -19,8 +21,10 @@
     def setup_network(self):
         self.nodes = []
         self.nodes.append(start_node(0, self.options.tmpdir, []))
-        self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=3"]))
-        self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=4"]))
+        self.nodes.append(
+            start_node(1, self.options.tmpdir, ["-blockversion=3"]))
+        self.nodes.append(
+            start_node(2, self.options.tmpdir, ["-blockversion=4"]))
         connect_nodes(self.nodes[1], 0)
         connect_nodes(self.nodes[2], 0)
         self.is_network_split = False
@@ -64,7 +68,8 @@
         self.nodes[1].generate(1)
         self.sync_all()
         if (self.nodes[0].getblockcount() != cnt + 1050):
-            raise AssertionError("Failed to mine a version=3 block after 949 version=4 blocks")
+            raise AssertionError(
+                "Failed to mine a version=3 block after 949 version=4 blocks")
 
         # Mine 1 new-version blocks
         self.nodes[2].generate(1)
@@ -75,12 +80,14 @@
         # Mine 1 old-version blocks
         try:
             self.nodes[1].generate(1)
-            raise AssertionError("Succeeded to mine a version=3 block after 950 version=4 blocks")
+            raise AssertionError(
+                "Succeeded to mine a version=3 block after 950 version=4 blocks")
         except JSONRPCException:
             pass
         self.sync_all()
         if (self.nodes[0].getblockcount() != cnt + 1051):
-            raise AssertionError("Accepted a version=3 block after 950 version=4 blocks")
+            raise AssertionError(
+                "Accepted a version=3 block after 950 version=4 blocks")
 
         # Mine 1 new-version blocks
         self.nodes[2].generate(1)
diff --git a/qa/rpc-tests/bipdersig.py b/qa/rpc-tests/bipdersig.py
--- a/qa/rpc-tests/bipdersig.py
+++ b/qa/rpc-tests/bipdersig.py
@@ -10,7 +10,9 @@
 from test_framework.test_framework import BitcoinTestFramework
 from test_framework.util import *
 
+
 class BIP66Test(BitcoinTestFramework):
+
     def __init__(self):
         super().__init__()
         self.num_nodes = 3
@@ -19,8 +21,10 @@
     def setup_network(self):
         self.nodes = []
         self.nodes.append(start_node(0, self.options.tmpdir, []))
-        self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=2"]))
-        self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=3"]))
+        self.nodes.append(
+            start_node(1, self.options.tmpdir, ["-blockversion=2"]))
+        self.nodes.append(
+            start_node(2, self.options.tmpdir, ["-blockversion=3"]))
         connect_nodes(self.nodes[1], 0)
         connect_nodes(self.nodes[2], 0)
         self.is_network_split = False
@@ -63,7 +67,8 @@
         self.nodes[1].generate(1)
         self.sync_all()
         if (self.nodes[0].getblockcount() != cnt + 1050):
-            raise AssertionError("Failed to mine a version=2 block after 949 version=3 blocks")
+            raise AssertionError(
+                "Failed to mine a version=2 block after 949 version=3 blocks")
 
         # Mine 1 new-version blocks
         self.nodes[2].generate(1)
@@ -74,12 +79,14 @@
         # Mine 1 old-version blocks
         try:
             self.nodes[1].generate(1)
-            raise AssertionError("Succeeded to mine a version=2 block after 950 version=3 blocks")
+            raise AssertionError(
+                "Succeeded to mine a version=2 block after 950 version=3 blocks")
         except JSONRPCException:
             pass
         self.sync_all()
         if (self.nodes[0].getblockcount() != cnt + 1051):
-            raise AssertionError("Accepted a version=2 block after 950 version=3 blocks")
+            raise AssertionError(
+                "Accepted a version=2 block after 950 version=3 blocks")
 
         # Mine 1 new-version blocks
         self.nodes[2].generate(1)
diff --git a/qa/rpc-tests/blockchain.py b/qa/rpc-tests/blockchain.py
--- a/qa/rpc-tests/blockchain.py
+++ b/qa/rpc-tests/blockchain.py
@@ -23,6 +23,7 @@
 
 
 class BlockchainTest(BitcoinTestFramework):
+
     """
     Test blockchain-related RPC calls:
 
diff --git a/qa/rpc-tests/create_cache.py b/qa/rpc-tests/create_cache.py
--- a/qa/rpc-tests/create_cache.py
+++ b/qa/rpc-tests/create_cache.py
@@ -10,6 +10,7 @@
 
 from test_framework.test_framework import BitcoinTestFramework
 
+
 class CreateCache(BitcoinTestFramework):
 
     def __init__(self):
diff --git a/qa/rpc-tests/decodescript.py b/qa/rpc-tests/decodescript.py
--- a/qa/rpc-tests/decodescript.py
+++ b/qa/rpc-tests/decodescript.py
@@ -8,7 +8,9 @@
 from test_framework.mininode import *
 from io import BytesIO
 
+
 class DecodeScriptTest(BitcoinTestFramework):
+
     """Tests decoding scripts via RPC command "decodescript"."""
 
     def __init__(self):
@@ -29,18 +31,21 @@
         # below are test cases for all of the standard transaction types
 
         # 1) P2PK scriptSig
-        # the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
+        # the scriptSig of a public key scriptPubKey simply pushes a signature
+        # onto the stack
         rpc_result = self.nodes[0].decodescript(push_signature)
         assert_equal(signature, rpc_result['asm'])
 
         # 2) P2PKH scriptSig
-        rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
+        rpc_result = self.nodes[0].decodescript(
+            push_signature + push_public_key)
         assert_equal(signature + ' ' + public_key, rpc_result['asm'])
 
         # 3) multisig scriptSig
         # this also tests the leading portion of a P2SH multisig scriptSig
         # OP_0 <A sig> <B sig>
-        rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
+        rpc_result = self.nodes[0].decodescript(
+            '00' + push_signature + push_signature)
         assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
 
         # 4) P2SH scriptSig
@@ -68,27 +73,35 @@
 
         # 2) P2PKH scriptPubKey
         # OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
-        rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
-        assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
+        rpc_result = self.nodes[0].decodescript(
+            '76a9' + push_public_key_hash + '88ac')
+        assert_equal('OP_DUP OP_HASH160 ' + public_key_hash +
+                     ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
 
         # 3) multisig scriptPubKey
         # <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
         # just imagine that the pub keys used below are different.
-        # for our purposes here it does not matter that they are the same even though it is unrealistic.
-        rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_public_key + push_public_key + '53ae')
-        assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key +  ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
+        # for our purposes here it does not matter that they are the same even
+        # though it is unrealistic.
+        rpc_result = self.nodes[0].decodescript(
+            '52' + push_public_key + push_public_key + push_public_key + '53ae')
+        assert_equal('2 ' + public_key + ' ' + public_key + ' ' +
+                     public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
 
         # 4) P2SH scriptPubKey
         # OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
         # push_public_key_hash here should actually be the hash of a redeem script.
         # but this works the same for purposes of this test.
-        rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
-        assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
+        rpc_result = self.nodes[0].decodescript(
+            'a9' + push_public_key_hash + '87')
+        assert_equal(
+            'OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
 
         # 5) null data scriptPubKey
         # use a signature look-alike here to make sure that we do not decode random data as a signature.
         # this matters if/when signature sighash decoding comes along.
-        # would want to make sure that no such decoding takes place in this case.
+        # would want to make sure that no such decoding takes place in this
+        # case.
         signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
         # OP_RETURN <data>
         rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
@@ -107,8 +120,10 @@
         # <sender-pubkey> OP_CHECKSIG
         #
         # lock until block 500,000
-        rpc_result = self.nodes[0].decodescript('63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac')
-        assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
+        rpc_result = self.nodes[0].decodescript(
+            '63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac')
+        assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' +
+                     public_key + ' OP_CHECKSIG', rpc_result['asm'])
 
     def decoderawtransaction_asm_sighashtype(self):
         """Tests decoding scripts via RPC command "decoderawtransaction".
@@ -116,38 +131,55 @@
         This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
         """
 
-        # this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
+        # this test case uses a random plain vanilla mainnet transaction with a
+        # single P2PKH input and output
         tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
         rpc_result = self.nodes[0].decoderawtransaction(tx)
-        assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
+        assert_equal(
+            '304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
 
         # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
         # it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
         # verify that we have not altered scriptPubKey decoding.
         tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
         rpc_result = self.nodes[0].decoderawtransaction(tx)
-        assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
-        assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
-        assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
-        assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
+        assert_equal(
+            '8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
+        assert_equal(
+            '0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
+        assert_equal(
+            'OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG',
+                     rpc_result['vout'][0]['scriptPubKey']['asm'])
+        assert_equal(
+            'OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL',
+                     rpc_result['vout'][1]['scriptPubKey']['asm'])
         txSave = CTransaction()
         txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
 
-        # make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
+        # make sure that a specifically crafted op_return value will not pass
+        # all the IsDERSignature checks and then get decoded as a sighash type
         tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
         rpc_result = self.nodes[0].decoderawtransaction(tx)
-        assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
+        assert_equal('OP_RETURN 300602010002010001',
+                     rpc_result['vout'][0]['scriptPubKey']['asm'])
 
-        # verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
+        # verify that we have not altered scriptPubKey processing even of a
+        # specially crafted P2PKH pubkeyhash and P2SH redeem script hash that
+        # is made to pass the der signature checks
         tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
         rpc_result = self.nodes[0].decoderawtransaction(tx)
-        assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
-        assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
+        assert_equal(
+            'OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG',
+                     rpc_result['vout'][0]['scriptPubKey']['asm'])
+        assert_equal(
+            'OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL',
+                     rpc_result['vout'][1]['scriptPubKey']['asm'])
 
         # some more full transaction tests of varying specific scriptSigs. used instead of
         # tests in decodescript_script_sig because the decodescript RPC is specifically
         # for working on scriptPubKeys (argh!).
-        push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
+        push_signature = bytes_to_hex_str(
+            txSave.vin[0].scriptSig)[2:(0x48 * 2 + 4)]
         signature = push_signature[2:]
         der_signature = signature[:-2]
         signature_sighash_decoded = der_signature + '[ALL]'
@@ -157,24 +189,36 @@
 
         # 1) P2PK scriptSig
         txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
-        rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
-        assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
+        rpc_result = self.nodes[0].decoderawtransaction(
+            bytes_to_hex_str(txSave.serialize()))
+        assert_equal(signature_sighash_decoded,
+                     rpc_result['vin'][0]['scriptSig']['asm'])
 
-        # make sure that the sighash decodes come out correctly for a more complex / lesser used case.
+        # make sure that the sighash decodes come out correctly for a more
+        # complex / lesser used case.
         txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
-        rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
-        assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
+        rpc_result = self.nodes[0].decoderawtransaction(
+            bytes_to_hex_str(txSave.serialize()))
+        assert_equal(signature_2_sighash_decoded,
+                     rpc_result['vin'][0]['scriptSig']['asm'])
 
         # 2) multisig scriptSig
-        txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
-        rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
-        assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
+        txSave.vin[0].scriptSig = hex_str_to_bytes(
+            '00' + push_signature + push_signature_2)
+        rpc_result = self.nodes[0].decoderawtransaction(
+            bytes_to_hex_str(txSave.serialize()))
+        assert_equal('0 ' + signature_sighash_decoded + ' ' +
+                     signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
 
         # 3) test a scriptSig that contains more than push operations.
-        # in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
-        txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
-        rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
-        assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
+        # in fact, it contains an OP_RETURN with data specially crafted to
+        # cause improper decode if the code does not catch it.
+        txSave.vin[0].scriptSig = hex_str_to_bytes(
+            '6a143011020701010101010101020601010101010101')
+        rpc_result = self.nodes[0].decoderawtransaction(
+            bytes_to_hex_str(txSave.serialize()))
+        assert_equal('OP_RETURN 3011020701010101010101020601010101010101',
+                     rpc_result['vin'][0]['scriptSig']['asm'])
 
     def run_test(self):
         self.decodescript_script_sig()
diff --git a/qa/rpc-tests/disablewallet.py b/qa/rpc-tests/disablewallet.py
--- a/qa/rpc-tests/disablewallet.py
+++ b/qa/rpc-tests/disablewallet.py
@@ -19,12 +19,14 @@
         self.num_nodes = 1
 
     def setup_network(self, split=False):
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-disablewallet']])
+        self.nodes = start_nodes(
+            self.num_nodes, self.options.tmpdir, [['-disablewallet']])
         self.is_network_split = False
         self.sync_all()
 
-    def run_test (self):
-        # Check regression: https://github.com/bitcoin/bitcoin/issues/6963#issuecomment-154548880
+    def run_test(self):
+        # Check regression:
+        # https://github.com/bitcoin/bitcoin/issues/6963#issuecomment-154548880
         x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
         assert(x['isvalid'] == False)
         x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
@@ -32,17 +34,20 @@
 
         # Checking mining to an address without a wallet
         try:
-            self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
+            self.nodes[0].generatetoaddress(
+                1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
         except JSONRPCException as e:
             assert("Invalid address" not in e.error['message'])
-            assert("ProcessNewBlock, block not accepted" not in e.error['message'])
+            assert(
+                "ProcessNewBlock, block not accepted" not in e.error['message'])
             assert("Couldn't create new block" not in e.error['message'])
 
         try:
-            self.nodes[0].generatetoaddress(1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
+            self.nodes[0].generatetoaddress(
+                1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
             raise AssertionError("Must not mine to invalid address!")
         except JSONRPCException as e:
             assert("Invalid address" in e.error['message'])
 
 if __name__ == '__main__':
-    DisableWalletTest ().main ()
+    DisableWalletTest().main()
diff --git a/qa/rpc-tests/forknotify.py b/qa/rpc-tests/forknotify.py
--- a/qa/rpc-tests/forknotify.py
+++ b/qa/rpc-tests/forknotify.py
@@ -4,12 +4,13 @@
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #
-# Test -alertnotify 
+# Test -alertnotify
 #
 
 from test_framework.test_framework import BitcoinTestFramework
 from test_framework.util import *
 
+
 class ForkNotifyTest(BitcoinTestFramework):
 
     def __init__(self):
@@ -25,10 +26,10 @@
         with open(self.alert_filename, 'w', encoding='utf8'):
             pass  # Just open then close to create zero-length file
         self.nodes.append(start_node(0, self.options.tmpdir,
-                            ["-blockversion=2", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]))
+                                     ["-blockversion=2", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]))
         # Node1 mines block.version=211 blocks
         self.nodes.append(start_node(1, self.options.tmpdir,
-                                ["-blockversion=211"]))
+                                     ["-blockversion=211"]))
         connect_nodes(self.nodes[1], 0)
 
         self.is_network_split = False
@@ -48,7 +49,8 @@
             alert_text = f.read()
 
         if len(alert_text) == 0:
-            raise AssertionError("-alertnotify did not warn of up-version blocks")
+            raise AssertionError(
+                "-alertnotify did not warn of up-version blocks")
 
         # Mine more up-version blocks, should not get more alerts:
         self.nodes[1].generate(1)
@@ -60,7 +62,8 @@
             alert_text2 = f.read()
 
         if alert_text != alert_text2:
-            raise AssertionError("-alertnotify excessive warning of up-version blocks")
+            raise AssertionError(
+                "-alertnotify excessive warning of up-version blocks")
 
 if __name__ == '__main__':
     ForkNotifyTest().main()
diff --git a/qa/rpc-tests/getblocktemplate_proposals.py b/qa/rpc-tests/getblocktemplate_proposals.py
--- a/qa/rpc-tests/getblocktemplate_proposals.py
+++ b/qa/rpc-tests/getblocktemplate_proposals.py
@@ -10,10 +10,14 @@
 from hashlib import sha256
 from struct import pack
 
+
 def b2x(b):
     return b2a_hex(b).decode('ascii')
 
-# NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0')
+# NOTE: This does not work for signed numbers (set the high bit) or zero
+# (use b'\0')
+
+
 def encodeUNum(n):
     s = bytearray(b'\1')
     while n > 127:
@@ -23,6 +27,7 @@
     s.append(n)
     return bytes(s)
 
+
 def varlenEncode(n):
     if n < 0xfd:
         return pack('<B', n)
@@ -32,9 +37,11 @@
         return b'\xfe' + pack('<L', n)
     return b'\xff' + pack('<Q', n)
 
+
 def dblsha(b):
     return sha256(sha256(b).digest()).digest()
 
+
 def genmrklroot(leaflist):
     cur = leaflist
     while len(cur) > 1:
@@ -42,30 +49,37 @@
         if len(cur) & 1:
             cur.append(cur[-1])
         for i in range(0, len(cur), 2):
-            n.append(dblsha(cur[i] + cur[i+1]))
+            n.append(dblsha(cur[i] + cur[i + 1]))
         cur = n
     return cur[0]
 
+
 def template_to_bytearray(tmpl, txlist):
     blkver = pack('<L', tmpl['version'])
     mrklroot = genmrklroot(list(dblsha(a) for a in txlist))
     timestamp = pack('<L', tmpl['curtime'])
     nonce = b'\0\0\0\0'
-    blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce
+    blk = blkver + a2b_hex(tmpl['previousblockhash'])[
+        ::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce
     blk += varlenEncode(len(txlist))
     for tx in txlist:
         blk += tx
     return bytearray(blk)
 
+
 def template_to_hex(tmpl, txlist):
     return b2x(template_to_bytearray(tmpl, txlist))
 
+
 def assert_template(node, tmpl, txlist, expect):
-    rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'})
+    rsp = node.getblocktemplate(
+        {'data': template_to_hex(tmpl, txlist), 'mode': 'proposal'})
     if rsp != expect:
         raise AssertionError('unexpected: %s' % (rsp,))
 
+
 class GetBlockTemplateProposalTest(BitcoinTestFramework):
+
     '''
     Test block proposals with getblocktemplate.
     '''
@@ -81,33 +95,36 @@
 
     def run_test(self):
         node = self.nodes[0]
-        node.generate(1) # Mine a block to leave initial block download
+        node.generate(1)  # Mine a block to leave initial block download
         tmpl = node.getblocktemplate()
         if 'coinbasetxn' not in tmpl:
             rawcoinbase = encodeUNum(tmpl['height'])
             rawcoinbase += b'\x01-'
             hexcoinbase = b2x(rawcoinbase)
             hexoutval = b2x(pack('<Q', tmpl['coinbasevalue']))
-            tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
-        txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
+            tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + (
+                '%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
+        txlist = list(bytearray(a2b_hex(a['data']))
+                      for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
 
         # Test 0: Capability advertised
         assert('proposal' in tmpl['capabilities'])
 
         # NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase)
-        ## Test 1: Bad height in coinbase
-        #txlist[0][4+1+36+1+1] += 1
-        #assert_template(node, tmpl, txlist, 'FIXME')
-        #txlist[0][4+1+36+1+1] -= 1
+        # Test 1: Bad height in coinbase
+        # txlist[0][4+1+36+1+1] += 1
+        # assert_template(node, tmpl, txlist, 'FIXME')
+        # txlist[0][4+1+36+1+1] -= 1
 
         # Test 2: Bad input hash for gen tx
-        txlist[0][4+1] += 1
+        txlist[0][4 + 1] += 1
         assert_template(node, tmpl, txlist, 'bad-cb-missing')
-        txlist[0][4+1] -= 1
+        txlist[0][4 + 1] -= 1
 
         # Test 3: Truncated final tx
         lastbyte = txlist[-1].pop()
-        assert_raises(JSONRPCException, assert_template, node, tmpl, txlist, 'n/a')
+        assert_raises(
+            JSONRPCException, assert_template, node, tmpl, txlist, 'n/a')
         txlist[-1].append(lastbyte)
 
         # Test 4: Add an invalid tx to the end (duplicate of gen tx)
@@ -117,7 +134,7 @@
 
         # Test 5: Add an invalid tx to the end (non-duplicate)
         txlist.append(bytearray(txlist[0]))
-        txlist[-1][4+1] = 0xff
+        txlist[-1][4 + 1] = 0xff
         assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent')
         txlist.pop()
 
@@ -128,7 +145,8 @@
 
         # Test 7: Bad tx count
         txlist.append(b'')
-        assert_raises(JSONRPCException, assert_template, node, tmpl, txlist, 'n/a')
+        assert_raises(
+            JSONRPCException, assert_template, node, tmpl, txlist, 'n/a')
         txlist.pop()
 
         # Test 8: Bad bits
@@ -139,8 +157,8 @@
 
         # Test 9: Bad merkle root
         rawtmpl = template_to_bytearray(tmpl, txlist)
-        rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100
-        rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'})
+        rawtmpl[4 + 32] = (rawtmpl[4 + 32] + 1) % 0x100
+        rsp = node.getblocktemplate({'data': b2x(rawtmpl), 'mode': 'proposal'})
         if rsp != 'bad-txnmrklroot':
             raise AssertionError('unexpected: %s' % (rsp,))
 
diff --git a/qa/rpc-tests/getchaintips.py b/qa/rpc-tests/getchaintips.py
--- a/qa/rpc-tests/getchaintips.py
+++ b/qa/rpc-tests/getchaintips.py
@@ -10,53 +10,55 @@
 from test_framework.test_framework import BitcoinTestFramework
 from test_framework.util import assert_equal
 
+
 class GetChainTipsTest (BitcoinTestFramework):
+
     def __init__(self):
         super().__init__()
         self.num_nodes = 4
         self.setup_clean_chain = False
 
-    def run_test (self):
+    def run_test(self):
 
-        tips = self.nodes[0].getchaintips ()
-        assert_equal (len (tips), 1)
-        assert_equal (tips[0]['branchlen'], 0)
-        assert_equal (tips[0]['height'], 200)
-        assert_equal (tips[0]['status'], 'active')
+        tips = self.nodes[0].getchaintips()
+        assert_equal(len(tips), 1)
+        assert_equal(tips[0]['branchlen'], 0)
+        assert_equal(tips[0]['height'], 200)
+        assert_equal(tips[0]['status'], 'active')
 
         # Split the network and build two chains of different lengths.
-        self.split_network ()
+        self.split_network()
         self.nodes[0].generate(10)
         self.nodes[2].generate(20)
-        self.sync_all ()
+        self.sync_all()
 
-        tips = self.nodes[1].getchaintips ()
-        assert_equal (len (tips), 1)
+        tips = self.nodes[1].getchaintips()
+        assert_equal(len(tips), 1)
         shortTip = tips[0]
-        assert_equal (shortTip['branchlen'], 0)
-        assert_equal (shortTip['height'], 210)
-        assert_equal (tips[0]['status'], 'active')
+        assert_equal(shortTip['branchlen'], 0)
+        assert_equal(shortTip['height'], 210)
+        assert_equal(tips[0]['status'], 'active')
 
-        tips = self.nodes[3].getchaintips ()
-        assert_equal (len (tips), 1)
+        tips = self.nodes[3].getchaintips()
+        assert_equal(len(tips), 1)
         longTip = tips[0]
-        assert_equal (longTip['branchlen'], 0)
-        assert_equal (longTip['height'], 220)
-        assert_equal (tips[0]['status'], 'active')
+        assert_equal(longTip['branchlen'], 0)
+        assert_equal(longTip['height'], 220)
+        assert_equal(tips[0]['status'], 'active')
 
         # Join the network halves and check that we now have two tips
         # (at least at the nodes that previously had the short chain).
-        self.join_network ()
+        self.join_network()
 
-        tips = self.nodes[0].getchaintips ()
-        assert_equal (len (tips), 2)
-        assert_equal (tips[0], longTip)
+        tips = self.nodes[0].getchaintips()
+        assert_equal(len(tips), 2)
+        assert_equal(tips[0], longTip)
 
-        assert_equal (tips[1]['branchlen'], 10)
-        assert_equal (tips[1]['status'], 'valid-fork')
+        assert_equal(tips[1]['branchlen'], 10)
+        assert_equal(tips[1]['status'], 'valid-fork')
         tips[1]['branchlen'] = 0
         tips[1]['status'] = 'active'
-        assert_equal (tips[1], shortTip)
+        assert_equal(tips[1], shortTip)
 
 if __name__ == '__main__':
-    GetChainTipsTest ().main ()
+    GetChainTipsTest().main()
diff --git a/qa/rpc-tests/httpbasics.py b/qa/rpc-tests/httpbasics.py
--- a/qa/rpc-tests/httpbasics.py
+++ b/qa/rpc-tests/httpbasics.py
@@ -13,7 +13,9 @@
 import http.client
 import urllib.parse
 
+
 class HTTPBasicsTest (BitcoinTestFramework):
+
     def __init__(self):
         super().__init__()
         self.num_nodes = 3
@@ -24,9 +26,9 @@
 
     def run_test(self):
 
-        #################################################
+        #
         # lowlevel check for http persistent connection #
-        #################################################
+        #
         url = urllib.parse.urlparse(self.nodes[0].url)
         authpair = url.username + ':' + url.password
         headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
@@ -36,43 +38,53 @@
         conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
         out1 = conn.getresponse().read()
         assert(b'"error":null' in out1)
-        assert(conn.sock!=None) #according to http/1.1 connection must still be open!
+        assert(conn.sock != None)
+               # according to http/1.1 connection must still be open!
 
-        #send 2nd request without closing connection
+        # send 2nd request without closing connection
         conn.request('POST', '/', '{"method": "getchaintips"}', headers)
         out1 = conn.getresponse().read()
-        assert(b'"error":null' in out1) #must also response with a correct json-rpc message
-        assert(conn.sock!=None) #according to http/1.1 connection must still be open!
+        assert(b'"error":null' in out1)
+               # must also response with a correct json-rpc message
+        assert(conn.sock != None)
+               # according to http/1.1 connection must still be open!
         conn.close()
 
-        #same should be if we add keep-alive because this should be the std. behaviour
-        headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
+        # same should be if we add keep-alive because this should be the std.
+        # behaviour
+        headers = {"Authorization": "Basic " +
+                   str_to_b64str(authpair), "Connection": "keep-alive"}
 
         conn = http.client.HTTPConnection(url.hostname, url.port)
         conn.connect()
         conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
         out1 = conn.getresponse().read()
         assert(b'"error":null' in out1)
-        assert(conn.sock!=None) #according to http/1.1 connection must still be open!
+        assert(conn.sock != None)
+               # according to http/1.1 connection must still be open!
 
-        #send 2nd request without closing connection
+        # send 2nd request without closing connection
         conn.request('POST', '/', '{"method": "getchaintips"}', headers)
         out1 = conn.getresponse().read()
-        assert(b'"error":null' in out1) #must also response with a correct json-rpc message
-        assert(conn.sock!=None) #according to http/1.1 connection must still be open!
+        assert(b'"error":null' in out1)
+               # must also response with a correct json-rpc message
+        assert(conn.sock != None)
+               # according to http/1.1 connection must still be open!
         conn.close()
 
-        #now do the same with "Connection: close"
-        headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
+        # now do the same with "Connection: close"
+        headers = {"Authorization": "Basic " +
+                   str_to_b64str(authpair), "Connection": "close"}
 
         conn = http.client.HTTPConnection(url.hostname, url.port)
         conn.connect()
         conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
         out1 = conn.getresponse().read()
         assert(b'"error":null' in out1)
-        assert(conn.sock==None) #now the connection must be closed after the response
+        assert(conn.sock == None)
+               # now the connection must be closed after the response
 
-        #node1 (2nd node) is running with disabled keep-alive option
+        # node1 (2nd node) is running with disabled keep-alive option
         urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
         authpair = urlNode1.username + ':' + urlNode1.password
         headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
@@ -83,7 +95,8 @@
         out1 = conn.getresponse().read()
         assert(b'"error":null' in out1)
 
-        #node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
+        # node2 (third node) is running with standard keep-alive parameters
+        # which means keep-alive is on
         urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
         authpair = urlNode2.username + ':' + urlNode2.password
         headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
@@ -93,21 +106,23 @@
         conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
         out1 = conn.getresponse().read()
         assert(b'"error":null' in out1)
-        assert(conn.sock!=None) #connection must be closed because bitcoind should use keep-alive by default
+        assert(conn.sock != None)
+               # connection must be closed because bitcoind should use
+               # keep-alive by default
 
         # Check excessive request size
         conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
         conn.connect()
-        conn.request('GET', '/' + ('x'*1000), '', headers)
+        conn.request('GET', '/' + ('x' * 1000), '', headers)
         out1 = conn.getresponse()
         assert_equal(out1.status, http.client.NOT_FOUND)
 
         conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
         conn.connect()
-        conn.request('GET', '/' + ('x'*10000), '', headers)
+        conn.request('GET', '/' + ('x' * 10000), '', headers)
         out1 = conn.getresponse()
         assert_equal(out1.status, http.client.BAD_REQUEST)
 
 
 if __name__ == '__main__':
-    HTTPBasicsTest ().main ()
+    HTTPBasicsTest().main()
diff --git a/qa/rpc-tests/importmulti.py b/qa/rpc-tests/importmulti.py
--- a/qa/rpc-tests/importmulti.py
+++ b/qa/rpc-tests/importmulti.py
@@ -6,7 +6,9 @@
 from test_framework.test_framework import BitcoinTestFramework
 from test_framework.util import *
 
+
 class ImportMultiTest (BitcoinTestFramework):
+
     def __init__(self):
         super().__init__()
         self.num_nodes = 2
@@ -14,13 +16,14 @@
 
     def setup_network(self, split=False):
         self.nodes = start_nodes(2, self.options.tmpdir)
-        self.is_network_split=False
+        self.is_network_split = False
 
-    def run_test (self):
+    def run_test(self):
         print ("Mining blocks...")
         self.nodes[0].generate(1)
         self.nodes[1].generate(1)
-        timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
+        timestamp = self.nodes[1].getblock(
+            self.nodes[1].getbestblockhash())['mediantime']
 
         # keyword definition
         PRIV_KEY = 'privkey'
@@ -28,23 +31,24 @@
         ADDRESS_KEY = 'address'
         SCRIPT_KEY = 'script'
 
+        node0_address1 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
+        node0_address2 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
+        node0_address3 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
 
-        node0_address1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-        node0_address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-        node0_address3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-
-        #Check only one address
+        # Check only one address
         assert_equal(node0_address1['ismine'], True)
 
-        #Node 1 sync test
-        assert_equal(self.nodes[1].getblockcount(),1)
+        # Node 1 sync test
+        assert_equal(self.nodes[1].getblockcount(), 1)
 
-        #Address Test - before import
+        # Address Test - before import
         address_info = self.nodes[1].validateaddress(node0_address1['address'])
         assert_equal(address_info['iswatchonly'], False)
         assert_equal(address_info['ismine'], False)
 
-
         # RPC importmulti -----------------------------------------------
 
         # Bitcoin Address
@@ -98,13 +102,13 @@
         }])
         assert_equal(result[0]['success'], False)
         assert_equal(result[0]['error']['code'], -8)
-        assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
+        assert_equal(result[0]['error']['message'],
+                     'Internal must be set for hex scriptPubKey')
         address_assert = self.nodes[1].validateaddress(address['address'])
         assert_equal(address_assert['iswatchonly'], False)
         assert_equal(address_assert['ismine'], False)
         assert_equal('timestamp' in address_assert, False)
 
-
         # Address + Public key + !Internal
         print("Should import an address with public key")
         address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
@@ -113,7 +117,7 @@
                 "address": address['address']
             },
             "timestamp": "now",
-            "pubkeys": [ address['pubkey'] ]
+            "pubkeys": [address['pubkey']]
         }])
         assert_equal(result[0]['success'], True)
         address_assert = self.nodes[1].validateaddress(address['address'])
@@ -121,14 +125,13 @@
         assert_equal(address_assert['ismine'], False)
         assert_equal(address_assert['timestamp'], timestamp)
 
-
         # ScriptPubKey + Public key + internal
         print("Should import a scriptPubKey with internal and with public key")
         address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
         request = [{
             "scriptPubKey": address['scriptPubKey'],
             "timestamp": "now",
-            "pubkeys": [ address['pubkey'] ],
+            "pubkeys": [address['pubkey']],
             "internal": True
         }]
         result = self.nodes[1].importmulti(request)
@@ -139,17 +142,19 @@
         assert_equal(address_assert['timestamp'], timestamp)
 
         # ScriptPubKey + Public key + !internal
-        print("Should not import a scriptPubKey without internal and with public key")
+        print(
+            "Should not import a scriptPubKey without internal and with public key")
         address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
         request = [{
             "scriptPubKey": address['scriptPubKey'],
             "timestamp": "now",
-            "pubkeys": [ address['pubkey'] ]
+            "pubkeys": [address['pubkey']]
         }]
         result = self.nodes[1].importmulti(request)
         assert_equal(result[0]['success'], False)
         assert_equal(result[0]['error']['code'], -8)
-        assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
+        assert_equal(result[0]['error']['message'],
+                     'Internal must be set for hex scriptPubKey')
         address_assert = self.nodes[1].validateaddress(address['address'])
         assert_equal(address_assert['iswatchonly'], False)
         assert_equal(address_assert['ismine'], False)
@@ -163,7 +168,7 @@
                 "address": address['address']
             },
             "timestamp": "now",
-            "keys": [ self.nodes[0].dumpprivkey(address['address']) ]
+            "keys": [self.nodes[0].dumpprivkey(address['address'])]
         }])
         assert_equal(result[0]['success'], True)
         address_assert = self.nodes[1].validateaddress(address['address'])
@@ -172,31 +177,34 @@
         assert_equal(address_assert['timestamp'], timestamp)
 
         # Address + Private key + watchonly
-        print("Should not import an address with private key and with watchonly")
+        print(
+            "Should not import an address with private key and with watchonly")
         address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
         result = self.nodes[1].importmulti([{
             "scriptPubKey": {
                 "address": address['address']
             },
             "timestamp": "now",
-            "keys": [ self.nodes[0].dumpprivkey(address['address']) ],
+            "keys": [self.nodes[0].dumpprivkey(address['address'])],
             "watchonly": True
         }])
         assert_equal(result[0]['success'], False)
         assert_equal(result[0]['error']['code'], -8)
-        assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
+        assert_equal(result[0]['error']['message'],
+                     'Incompatibility found between watchonly and keys')
         address_assert = self.nodes[1].validateaddress(address['address'])
         assert_equal(address_assert['iswatchonly'], False)
         assert_equal(address_assert['ismine'], False)
         assert_equal('timestamp' in address_assert, False)
 
         # ScriptPubKey + Private key + internal
-        print("Should import a scriptPubKey with internal and with private key")
+        print(
+            "Should import a scriptPubKey with internal and with private key")
         address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
         result = self.nodes[1].importmulti([{
             "scriptPubKey": address['scriptPubKey'],
             "timestamp": "now",
-            "keys": [ self.nodes[0].dumpprivkey(address['address']) ],
+            "keys": [self.nodes[0].dumpprivkey(address['address'])],
             "internal": True
         }])
         assert_equal(result[0]['success'], True)
@@ -206,31 +214,38 @@
         assert_equal(address_assert['timestamp'], timestamp)
 
         # ScriptPubKey + Private key + !internal
-        print("Should not import a scriptPubKey without internal and with private key")
+        print(
+            "Should not import a scriptPubKey without internal and with private key")
         address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
         result = self.nodes[1].importmulti([{
             "scriptPubKey": address['scriptPubKey'],
             "timestamp": "now",
-            "keys": [ self.nodes[0].dumpprivkey(address['address']) ]
+            "keys": [self.nodes[0].dumpprivkey(address['address'])]
         }])
         assert_equal(result[0]['success'], False)
         assert_equal(result[0]['error']['code'], -8)
-        assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
+        assert_equal(result[0]['error']['message'],
+                     'Internal must be set for hex scriptPubKey')
         address_assert = self.nodes[1].validateaddress(address['address'])
         assert_equal(address_assert['iswatchonly'], False)
         assert_equal(address_assert['ismine'], False)
         assert_equal('timestamp' in address_assert, False)
 
-
         # P2SH address
-        sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-        sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-        sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-        multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
+        sig_address_1 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
+        sig_address_2 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
+        sig_address_3 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
+        multi_sig_script = self.nodes[0].createmultisig(
+            2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
         self.nodes[1].generate(100)
-        transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
+        transactionid = self.nodes[1].sendtoaddress(
+            multi_sig_script['address'], 10.00)
         self.nodes[1].generate(1)
-        timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
+        timestamp = self.nodes[1].getblock(
+            self.nodes[1].getbestblockhash())['mediantime']
         transaction = self.nodes[1].gettransaction(transactionid)
 
         print("Should import a p2sh")
@@ -241,24 +256,31 @@
             "timestamp": "now",
         }])
         assert_equal(result[0]['success'], True)
-        address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
+        address_assert = self.nodes[1].validateaddress(
+            multi_sig_script['address'])
         assert_equal(address_assert['isscript'], True)
         assert_equal(address_assert['iswatchonly'], True)
         assert_equal(address_assert['timestamp'], timestamp)
-        p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
+        p2shunspent = self.nodes[1].listunspent(
+            0, 999999, [multi_sig_script['address']])[0]
         assert_equal(p2shunspent['spendable'], False)
         assert_equal(p2shunspent['solvable'], False)
 
-
         # P2SH + Redeem script
-        sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-        sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-        sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-        multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
+        sig_address_1 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
+        sig_address_2 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
+        sig_address_3 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
+        multi_sig_script = self.nodes[0].createmultisig(
+            2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
         self.nodes[1].generate(100)
-        transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
+        transactionid = self.nodes[1].sendtoaddress(
+            multi_sig_script['address'], 10.00)
         self.nodes[1].generate(1)
-        timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
+        timestamp = self.nodes[1].getblock(
+            self.nodes[1].getbestblockhash())['mediantime']
         transaction = self.nodes[1].gettransaction(transactionid)
 
         print("Should import a p2sh with respective redeem script")
@@ -270,66 +292,82 @@
             "redeemscript": multi_sig_script['redeemScript']
         }])
         assert_equal(result[0]['success'], True)
-        address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
+        address_assert = self.nodes[1].validateaddress(
+            multi_sig_script['address'])
         assert_equal(address_assert['timestamp'], timestamp)
 
-        p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
+        p2shunspent = self.nodes[1].listunspent(
+            0, 999999, [multi_sig_script['address']])[0]
         assert_equal(p2shunspent['spendable'], False)
         assert_equal(p2shunspent['solvable'], True)
 
-
         # P2SH + Redeem script + Private Keys + !Watchonly
-        sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-        sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-        sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-        multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
+        sig_address_1 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
+        sig_address_2 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
+        sig_address_3 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
+        multi_sig_script = self.nodes[0].createmultisig(
+            2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
         self.nodes[1].generate(100)
-        transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
+        transactionid = self.nodes[1].sendtoaddress(
+            multi_sig_script['address'], 10.00)
         self.nodes[1].generate(1)
-        timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
+        timestamp = self.nodes[1].getblock(
+            self.nodes[1].getbestblockhash())['mediantime']
         transaction = self.nodes[1].gettransaction(transactionid)
 
-        print("Should import a p2sh with respective redeem script and private keys")
+        print(
+            "Should import a p2sh with respective redeem script and private keys")
         result = self.nodes[1].importmulti([{
             "scriptPubKey": {
                 "address": multi_sig_script['address']
             },
             "timestamp": "now",
             "redeemscript": multi_sig_script['redeemScript'],
-            "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])]
+            "keys": [self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])]
         }])
         assert_equal(result[0]['success'], True)
-        address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
+        address_assert = self.nodes[1].validateaddress(
+            multi_sig_script['address'])
         assert_equal(address_assert['timestamp'], timestamp)
 
-        p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
+        p2shunspent = self.nodes[1].listunspent(
+            0, 999999, [multi_sig_script['address']])[0]
         assert_equal(p2shunspent['spendable'], False)
         assert_equal(p2shunspent['solvable'], True)
 
         # P2SH + Redeem script + Private Keys + Watchonly
-        sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-        sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-        sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
-        multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
+        sig_address_1 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
+        sig_address_2 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
+        sig_address_3 = self.nodes[0].validateaddress(
+            self.nodes[0].getnewaddress())
+        multi_sig_script = self.nodes[0].createmultisig(
+            2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
         self.nodes[1].generate(100)
-        transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
+        transactionid = self.nodes[1].sendtoaddress(
+            multi_sig_script['address'], 10.00)
         self.nodes[1].generate(1)
         transaction = self.nodes[1].gettransaction(transactionid)
 
-        print("Should import a p2sh with respective redeem script and private keys")
+        print(
+            "Should import a p2sh with respective redeem script and private keys")
         result = self.nodes[1].importmulti([{
             "scriptPubKey": {
                 "address": multi_sig_script['address']
             },
             "timestamp": "now",
             "redeemscript": multi_sig_script['redeemScript'],
-            "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])],
+            "keys": [self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])],
             "watchonly": True
         }])
         assert_equal(result[0]['success'], False)
         assert_equal(result[0]['error']['code'], -8)
-        assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
-
+        assert_equal(result[0]['error']['message'],
+                     'Incompatibility found between watchonly and keys')
 
         # Address + Public key + !Internal + Wrong pubkey
         print("Should not import an address with a wrong public key")
@@ -340,7 +378,7 @@
                 "address": address['address']
             },
             "timestamp": "now",
-            "pubkeys": [ address2['pubkey'] ]
+            "pubkeys": [address2['pubkey']]
         }])
         assert_equal(result[0]['success'], False)
         assert_equal(result[0]['error']['code'], -5)
@@ -350,15 +388,15 @@
         assert_equal(address_assert['ismine'], False)
         assert_equal('timestamp' in address_assert, False)
 
-
         # ScriptPubKey + Public key + internal + Wrong pubkey
-        print("Should not import a scriptPubKey with internal and with a wrong public key")
+        print(
+            "Should not import a scriptPubKey with internal and with a wrong public key")
         address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
         address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
         request = [{
             "scriptPubKey": address['scriptPubKey'],
             "timestamp": "now",
-            "pubkeys": [ address2['pubkey'] ],
+            "pubkeys": [address2['pubkey']],
             "internal": True
         }]
         result = self.nodes[1].importmulti(request)
@@ -370,7 +408,6 @@
         assert_equal(address_assert['ismine'], False)
         assert_equal('timestamp' in address_assert, False)
 
-
         # Address + Private key + !watchonly + Wrong private key
         print("Should not import an address with a wrong private key")
         address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
@@ -380,7 +417,7 @@
                 "address": address['address']
             },
             "timestamp": "now",
-            "keys": [ self.nodes[0].dumpprivkey(address2['address']) ]
+            "keys": [self.nodes[0].dumpprivkey(address2['address'])]
         }])
         assert_equal(result[0]['success'], False)
         assert_equal(result[0]['error']['code'], -5)
@@ -390,15 +427,15 @@
         assert_equal(address_assert['ismine'], False)
         assert_equal('timestamp' in address_assert, False)
 
-
         # ScriptPubKey + Private key + internal + Wrong private key
-        print("Should not import a scriptPubKey with internal and with a wrong private key")
+        print(
+            "Should not import a scriptPubKey with internal and with a wrong private key")
         address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
         address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
         result = self.nodes[1].importmulti([{
             "scriptPubKey": address['scriptPubKey'],
             "timestamp": "now",
-            "keys": [ self.nodes[0].dumpprivkey(address2['address']) ],
+            "keys": [self.nodes[0].dumpprivkey(address2['address'])],
             "internal": True
         }])
         assert_equal(result[0]['success'], False)
@@ -409,21 +446,24 @@
         assert_equal(address_assert['ismine'], False)
         assert_equal('timestamp' in address_assert, False)
 
-        # restart nodes to check for proper serialization/deserialization of watch only address
+        # restart nodes to check for proper serialization/deserialization of
+        # watch only address
         stop_nodes(self.nodes)
         self.nodes = start_nodes(2, self.options.tmpdir)
         address_assert = self.nodes[1].validateaddress(watchonly_address)
         assert_equal(address_assert['iswatchonly'], True)
         assert_equal(address_assert['ismine'], False)
-        assert_equal(address_assert['timestamp'], watchonly_timestamp);
+        assert_equal(address_assert['timestamp'], watchonly_timestamp)
 
         # Bad or missing timestamps
         print("Should throw on invalid or missing timestamp values")
-        assert_raises_message(JSONRPCException, 'Missing required timestamp field for key',
+        assert_raises_message(
+            JSONRPCException, 'Missing required timestamp field for key',
             self.nodes[1].importmulti, [{
                 "scriptPubKey": address['scriptPubKey'],
             }])
-        assert_raises_message(JSONRPCException, 'Expected number or "now" timestamp value for key. got type string',
+        assert_raises_message(
+            JSONRPCException, 'Expected number or "now" timestamp value for key. got type string',
             self.nodes[1].importmulti, [{
                 "scriptPubKey": address['scriptPubKey'],
                 "timestamp": "",
@@ -431,4 +471,4 @@
 
 
 if __name__ == '__main__':
-    ImportMultiTest ().main ()
+    ImportMultiTest().main()
diff --git a/qa/rpc-tests/importprunedfunds.py b/qa/rpc-tests/importprunedfunds.py
--- a/qa/rpc-tests/importprunedfunds.py
+++ b/qa/rpc-tests/importprunedfunds.py
@@ -16,8 +16,8 @@
 
     def setup_network(self, split=False):
         self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
-        connect_nodes_bi(self.nodes,0,1)
-        self.is_network_split=False
+        connect_nodes_bi(self.nodes, 0, 1)
+        self.is_network_split = False
         self.sync_all()
 
     def run_test(self):
@@ -25,26 +25,28 @@
         self.nodes[0].generate(101)
 
         self.sync_all()
-        
+
         # address
         address1 = self.nodes[0].getnewaddress()
         # pubkey
         address2 = self.nodes[0].getnewaddress()
-        address2_pubkey = self.nodes[0].validateaddress(address2)['pubkey']                 # Using pubkey
+        address2_pubkey = self.nodes[0].validateaddress(
+            address2)['pubkey']                 # Using pubkey
         # privkey
         address3 = self.nodes[0].getnewaddress()
-        address3_privkey = self.nodes[0].dumpprivkey(address3)                              # Using privkey
+        address3_privkey = self.nodes[0].dumpprivkey(
+            address3)                              # Using privkey
 
-        #Check only one address
+        # Check only one address
         address_info = self.nodes[0].validateaddress(address1)
         assert_equal(address_info['ismine'], True)
 
         self.sync_all()
 
-        #Node 1 sync test
-        assert_equal(self.nodes[1].getblockcount(),101)
+        # Node 1 sync test
+        assert_equal(self.nodes[1].getblockcount(), 101)
 
-        #Address Test - before import
+        # Address Test - before import
         address_info = self.nodes[1].validateaddress(address1)
         assert_equal(address_info['iswatchonly'], False)
         assert_equal(address_info['ismine'], False)
@@ -57,7 +59,7 @@
         assert_equal(address_info['iswatchonly'], False)
         assert_equal(address_info['ismine'], False)
 
-        #Send funds to self
+        # Send funds to self
         txnid1 = self.nodes[0].sendtoaddress(address1, 0.1)
         self.nodes[0].generate(1)
         rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex']
@@ -75,7 +77,7 @@
 
         self.sync_all()
 
-        #Import with no affiliated address
+        # Import with no affiliated address
         try:
             self.nodes[1].importprunedfunds(rawtxn1, proof1)
         except JSONRPCException as e:
@@ -86,13 +88,13 @@
         balance1 = self.nodes[1].getbalance("", 0, True)
         assert_equal(balance1, Decimal(0))
 
-        #Import with affiliated address with no rescan
+        # Import with affiliated address with no rescan
         self.nodes[1].importaddress(address2, "add2", False)
         result2 = self.nodes[1].importprunedfunds(rawtxn2, proof2)
         balance2 = self.nodes[1].getbalance("add2", 0, True)
         assert_equal(balance2, Decimal('0.05'))
 
-        #Import with private key with no rescan
+        # Import with private key with no rescan
         self.nodes[1].importprivkey(address3_privkey, "add3", False)
         result3 = self.nodes[1].importprunedfunds(rawtxn3, proof3)
         balance3 = self.nodes[1].getbalance("add3", 0, False)
@@ -100,7 +102,7 @@
         balance3 = self.nodes[1].getbalance("*", 0, True)
         assert_equal(balance3, Decimal('0.075'))
 
-        #Addresses Test - after import
+        # Addresses Test - after import
         address_info = self.nodes[1].validateaddress(address1)
         assert_equal(address_info['iswatchonly'], False)
         assert_equal(address_info['ismine'], False)
@@ -111,7 +113,7 @@
         assert_equal(address_info['iswatchonly'], False)
         assert_equal(address_info['ismine'], True)
 
-        #Remove transactions
+        # Remove transactions
         try:
             self.nodes[1].removeprunedfunds(txnid1)
         except JSONRPCException as e:
diff --git a/qa/rpc-tests/invalidateblock.py b/qa/rpc-tests/invalidateblock.py
--- a/qa/rpc-tests/invalidateblock.py
+++ b/qa/rpc-tests/invalidateblock.py
@@ -10,9 +10,9 @@
 from test_framework.test_framework import BitcoinTestFramework
 from test_framework.util import *
 
+
 class InvalidateTest(BitcoinTestFramework):
-    
-        
+
     def __init__(self):
         super().__init__()
         self.setup_clean_chain = True
@@ -20,13 +20,14 @@
 
     def setup_network(self):
         self.nodes = []
-        self.is_network_split = False 
+        self.is_network_split = False
         self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
         self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
         self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
-        
+
     def run_test(self):
-        print("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
+        print(
+            "Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
         print("Mine 4 blocks on Node 0")
         self.nodes[0].generate(4)
         assert(self.nodes[0].getblockcount() == 4)
@@ -37,20 +38,22 @@
         assert(self.nodes[1].getblockcount() == 6)
 
         print("Connect nodes to force a reorg")
-        connect_nodes_bi(self.nodes,0,1)
+        connect_nodes_bi(self.nodes, 0, 1)
         sync_blocks(self.nodes[0:2])
         assert(self.nodes[0].getblockcount() == 6)
         badhash = self.nodes[1].getblockhash(2)
 
-        print("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
+        print(
+            "Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
         self.nodes[0].invalidateblock(badhash)
         newheight = self.nodes[0].getblockcount()
         newhash = self.nodes[0].getbestblockhash()
         if (newheight != 4 or newhash != besthash):
-            raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
+            raise AssertionError(
+                "Wrong tip for node0, hash %s, height %d" % (newhash, newheight))
 
         print("\nMake sure we won't reorg to a lower work chain:")
-        connect_nodes_bi(self.nodes,1,2)
+        connect_nodes_bi(self.nodes, 1, 2)
         print("Sync node 2 to node 1 so both have 6 blocks")
         sync_blocks(self.nodes[1:3])
         assert(self.nodes[2].getblockcount() == 6)
@@ -65,12 +68,13 @@
         print("Verify all nodes are at the right height")
         time.sleep(5)
         for i in range(3):
-            print(i,self.nodes[i].getblockcount())
+            print(i, self.nodes[i].getblockcount())
         assert(self.nodes[2].getblockcount() == 3)
         assert(self.nodes[0].getblockcount() == 4)
         node1height = self.nodes[1].getblockcount()
         if node1height < 4:
-            raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
+            raise AssertionError(
+                "Node 1 reorged to a lower height: %d" % node1height)
 
 if __name__ == '__main__':
     InvalidateTest().main()
diff --git a/qa/rpc-tests/invalidblockrequest.py b/qa/rpc-tests/invalidblockrequest.py
--- a/qa/rpc-tests/invalidblockrequest.py
+++ b/qa/rpc-tests/invalidblockrequest.py
@@ -20,10 +20,13 @@
 '''
 
 # Use the ComparisonTestFramework with 1 node: only use --testbinary.
+
+
 class InvalidBlockRequestTest(ComparisonTestFramework):
 
-    ''' Can either run this test as 1 node with expected answers, or two and compare them. 
+    ''' Can either run this test as 1 node with expected answers, or two and compare them.
         Change the "outcome" variable from each TestInstance object to only do the comparison. '''
+
     def __init__(self):
         super().__init__()
         self.num_nodes = 1
@@ -33,19 +36,20 @@
         test.add_all_connections(self.nodes)
         self.tip = None
         self.block_time = None
-        NetworkThread().start() # Start up network handling in another thread
+        NetworkThread().start()  # Start up network handling in another thread
         test.run()
 
     def get_tests(self):
         if self.tip is None:
             self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
-        self.block_time = int(time.time())+1
+        self.block_time = int(time.time()) + 1
 
         '''
         Create a new block with an anyone-can-spend coinbase
         '''
         height = 1
-        block = create_block(self.tip, create_coinbase(height), self.block_time)
+        block = create_block(
+            self.tip, create_coinbase(height), self.block_time)
         self.block_time += 1
         block.solve()
         # Save the coinbase for later
@@ -59,7 +63,8 @@
         '''
         test = TestInstance(sync_every_block=False)
         for i in range(100):
-            block = create_block(self.tip, create_coinbase(height), self.block_time)
+            block = create_block(
+                self.tip, create_coinbase(height), self.block_time)
             block.solve()
             self.tip = block.sha256
             self.block_time += 1
@@ -71,10 +76,11 @@
         Now we use merkle-root malleability to generate an invalid block with
         same blockheader.
         Manufacture a block with 3 transactions (coinbase, spend of prior
-        coinbase, spend of that spend).  Duplicate the 3rd transaction to 
+        coinbase, spend of that spend).  Duplicate the 3rd transaction to
         leave merkle root and blockheader unchanged but invalidate the block.
         '''
-        block2 = create_block(self.tip, create_coinbase(height), self.block_time)
+        block2 = create_block(
+            self.tip, create_coinbase(height), self.block_time)
         self.block_time += 1
 
         # b'0x51' is OP_TRUE
@@ -101,10 +107,11 @@
         '''
         Make sure that a totally screwed up block is not valid.
         '''
-        block3 = create_block(self.tip, create_coinbase(height), self.block_time)
+        block3 = create_block(
+            self.tip, create_coinbase(height), self.block_time)
         self.block_time += 1
-        block3.vtx[0].vout[0].nValue = 100 * COIN # Too high!
-        block3.vtx[0].sha256=None
+        block3.vtx[0].vout[0].nValue = 100 * COIN  # Too high!
+        block3.vtx[0].sha256 = None
         block3.vtx[0].calc_sha256()
         block3.hashMerkleRoot = block3.calc_merkle_root()
         block3.rehash()
diff --git a/qa/rpc-tests/keypool.py b/qa/rpc-tests/keypool.py
--- a/qa/rpc-tests/keypool.py
+++ b/qa/rpc-tests/keypool.py
@@ -8,15 +8,18 @@
 from test_framework.test_framework import BitcoinTestFramework
 from test_framework.util import *
 
+
 class KeyPoolTest(BitcoinTestFramework):
 
     def run_test(self):
         nodes = self.nodes
         addr_before_encrypting = nodes[0].getnewaddress()
-        addr_before_encrypting_data = nodes[0].validateaddress(addr_before_encrypting)
+        addr_before_encrypting_data = nodes[
+            0].validateaddress(addr_before_encrypting)
         wallet_info_old = nodes[0].getwalletinfo()
-        assert(addr_before_encrypting_data['hdmasterkeyid'] == wallet_info_old['hdmasterkeyid'])
-        
+        assert(addr_before_encrypting_data[
+               'hdmasterkeyid'] == wallet_info_old['hdmasterkeyid'])
+
         # Encrypt wallet and wait to terminate
         nodes[0].encryptwallet('test')
         bitcoind_processes[0].wait()
@@ -26,14 +29,16 @@
         addr = nodes[0].getnewaddress()
         addr_data = nodes[0].validateaddress(addr)
         wallet_info = nodes[0].getwalletinfo()
-        assert(addr_before_encrypting_data['hdmasterkeyid'] != wallet_info['hdmasterkeyid'])
+        assert(addr_before_encrypting_data[
+               'hdmasterkeyid'] != wallet_info['hdmasterkeyid'])
         assert(addr_data['hdmasterkeyid'] == wallet_info['hdmasterkeyid'])
-        
+
         try:
             addr = nodes[0].getnewaddress()
-            raise AssertionError('Keypool should be exhausted after one address')
+            raise AssertionError(
+                'Keypool should be exhausted after one address')
         except JSONRPCException as e:
-            assert(e.error['code']==-12)
+            assert(e.error['code'] == -12)
 
         # put three new keys in the keypool
         nodes[0].walletpassphrase('test', 12000)
@@ -51,9 +56,10 @@
         # the next one should fail
         try:
             addr = nodes[0].getrawchangeaddress()
-            raise AssertionError('Keypool should be exhausted after three addresses')
+            raise AssertionError(
+                'Keypool should be exhausted after three addresses')
         except JSONRPCException as e:
-            assert(e.error['code']==-12)
+            assert(e.error['code'] == -12)
 
         # refill keypool with three new addresses
         nodes[0].walletpassphrase('test', 1)
@@ -69,9 +75,10 @@
         nodes[0].generate(1)
         try:
             nodes[0].generate(1)
-            raise AssertionError('Keypool should be exhausted after three addesses')
+            raise AssertionError(
+                'Keypool should be exhausted after three addesses')
         except JSONRPCException as e:
-            assert(e.error['code']==-12)
+            assert(e.error['code'] == -12)
 
     def __init__(self):
         super().__init__()
diff --git a/qa/rpc-tests/listsinceblock.py b/qa/rpc-tests/listsinceblock.py
--- a/qa/rpc-tests/listsinceblock.py
+++ b/qa/rpc-tests/listsinceblock.py
@@ -6,6 +6,7 @@
 from test_framework.test_framework import BitcoinTestFramework
 from test_framework.util import assert_equal
 
+
 class ListSinceBlockTest (BitcoinTestFramework):
 
     def __init__(self):
@@ -13,7 +14,7 @@
         self.setup_clean_chain = True
         self.num_nodes = 4
 
-    def run_test (self):
+    def run_test(self):
         '''
         `listsinceblock` did not behave correctly when handed a block that was
         no longer in the main chain:
@@ -67,7 +68,8 @@
 
         self.join_network()
 
-        # listsinceblock(lastblockhash) should now include tx, as seen from nodes[0]
+        # listsinceblock(lastblockhash) should now include tx, as seen from
+        # nodes[0]
         lsbres = self.nodes[0].listsinceblock(lastblockhash)
         found = False
         for tx in lsbres['transactions']:
diff --git a/qa/rpc-tests/maxblocksinflight.py b/qa/rpc-tests/maxblocksinflight.py
--- a/qa/rpc-tests/maxblocksinflight.py
+++ b/qa/rpc-tests/maxblocksinflight.py
@@ -16,8 +16,10 @@
 '''
 MAX_REQUESTS = 128
 
+
 class TestManager(NodeConnCB):
     # set up NodeConnCB callbacks, overriding base class
+
     def on_getdata(self, conn, message):
         self.log.debug("got getdata %s" % repr(message))
         # Log the requests
@@ -61,16 +63,20 @@
                 for key in self.blockReqCounts:
                     total_requests += self.blockReqCounts[key]
                     if self.blockReqCounts[key] > 1:
-                        raise AssertionError("Error, test failed: block %064x requested more than once" % key)
+                        raise AssertionError(
+                            "Error, test failed: block %064x requested more than once" % key)
             if total_requests > MAX_REQUESTS:
-                raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
-            print("Round %d: success (total requests: %d)" % (count, total_requests))
+                raise AssertionError(
+                    "Error, too many blocks (%d) requested" % total_requests)
+            print("Round %d: success (total requests: %d)" %
+                  (count, total_requests))
 
         self.disconnectOkay = True
         self.connection.disconnect_node()
 
 
 class MaxBlocksInFlightTest(BitcoinTestFramework):
+
     def add_options(self, parser):
         parser.add_option("--testbinary", dest="testbinary",
                           default=os.getenv("BITCOIND", "bitcoind"),
@@ -83,12 +89,14 @@
 
     def setup_network(self):
         self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
-                                 extra_args=[['-debug', '-whitelist=127.0.0.1']],
+                                 extra_args=[
+                                     ['-debug', '-whitelist=127.0.0.1']],
                                  binary=[self.options.testbinary])
 
     def run_test(self):
         test = TestManager()
-        test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
+        test.add_new_connection(
+            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
         NetworkThread().start()  # Start up network handling in another thread
         test.run()
 
diff --git a/qa/rpc-tests/multi_rpc.py b/qa/rpc-tests/multi_rpc.py
--- a/qa/rpc-tests/multi_rpc.py
+++ b/qa/rpc-tests/multi_rpc.py
@@ -14,6 +14,7 @@
 import http.client
 import urllib.parse
 
+
 class HTTPBasicsTest (BitcoinTestFramework):
 
     def __init__(self):
@@ -23,34 +24,34 @@
 
     def setup_chain(self):
         super().setup_chain()
-        #Append rpcauth to bitcoin.conf before initialization
+        # Append rpcauth to bitcoin.conf before initialization
         rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
         rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
-        with open(os.path.join(self.options.tmpdir+"/node0", "bitcoin.conf"), 'a', encoding='utf8') as f:
-            f.write(rpcauth+"\n")
-            f.write(rpcauth2+"\n")
+        with open(os.path.join(self.options.tmpdir + "/node0", "bitcoin.conf"), 'a', encoding='utf8') as f:
+            f.write(rpcauth + "\n")
+            f.write(rpcauth2 + "\n")
 
     def setup_network(self):
         self.nodes = self.setup_nodes()
 
     def run_test(self):
 
-        ##################################################
+        #
         # Check correctness of the rpcauth config option #
-        ##################################################
+        #
         url = urllib.parse.urlparse(self.nodes[0].url)
 
-        #Old authpair
+        # Old authpair
         authpair = url.username + ':' + url.password
 
-        #New authpair generated via share/rpcuser tool
+        # New authpair generated via share/rpcuser tool
         rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
         password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
 
-        #Second authpair with different username
+        # Second authpair with different username
         rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
         password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
-        authpairnew = "rt:"+password
+        authpairnew = "rt:" + password
 
         headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
 
@@ -58,63 +59,63 @@
         conn.connect()
         conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
         resp = conn.getresponse()
-        assert_equal(resp.status==401, False)
+        assert_equal(resp.status == 401, False)
         conn.close()
-        
-        #Use new authpair to confirm both work
+
+        # Use new authpair to confirm both work
         headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
 
         conn = http.client.HTTPConnection(url.hostname, url.port)
         conn.connect()
         conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
         resp = conn.getresponse()
-        assert_equal(resp.status==401, False)
+        assert_equal(resp.status == 401, False)
         conn.close()
 
-        #Wrong login name with rt's password
-        authpairnew = "rtwrong:"+password
+        # Wrong login name with rt's password
+        authpairnew = "rtwrong:" + password
         headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
 
         conn = http.client.HTTPConnection(url.hostname, url.port)
         conn.connect()
         conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
         resp = conn.getresponse()
-        assert_equal(resp.status==401, True)
+        assert_equal(resp.status == 401, True)
         conn.close()
 
-        #Wrong password for rt
-        authpairnew = "rt:"+password+"wrong"
+        # Wrong password for rt
+        authpairnew = "rt:" + password + "wrong"
         headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
 
         conn = http.client.HTTPConnection(url.hostname, url.port)
         conn.connect()
         conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
         resp = conn.getresponse()
-        assert_equal(resp.status==401, True)
+        assert_equal(resp.status == 401, True)
         conn.close()
 
-        #Correct for rt2
-        authpairnew = "rt2:"+password2
+        # Correct for rt2
+        authpairnew = "rt2:" + password2
         headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
 
         conn = http.client.HTTPConnection(url.hostname, url.port)
         conn.connect()
         conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
         resp = conn.getresponse()
-        assert_equal(resp.status==401, False)
+        assert_equal(resp.status == 401, False)
         conn.close()
 
-        #Wrong password for rt2
-        authpairnew = "rt2:"+password2+"wrong"
+        # Wrong password for rt2
+        authpairnew = "rt2:" + password2 + "wrong"
         headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
 
         conn = http.client.HTTPConnection(url.hostname, url.port)
         conn.connect()
         conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
         resp = conn.getresponse()
-        assert_equal(resp.status==401, True)
+        assert_equal(resp.status == 401, True)
         conn.close()
 
 
 if __name__ == '__main__':
-    HTTPBasicsTest ().main ()
+    HTTPBasicsTest().main()
diff --git a/qa/rpc-tests/nodehandling.py b/qa/rpc-tests/nodehandling.py
--- a/qa/rpc-tests/nodehandling.py
+++ b/qa/rpc-tests/nodehandling.py
@@ -12,6 +12,7 @@
 
 import urllib.parse
 
+
 class NodeHandlingTest (BitcoinTestFramework):
 
     def __init__(self):
@@ -20,23 +21,29 @@
         self.setup_clean_chain = False
 
     def run_test(self):
-        ###########################
+        #
         # setban/listbanned tests #
-        ###########################
-        assert_equal(len(self.nodes[2].getpeerinfo()), 4) #we should have 4 nodes at this point
+        #
+        assert_equal(len(self.nodes[2].getpeerinfo()), 4)
+                     # we should have 4 nodes at this point
         self.nodes[2].setban("127.0.0.1", "add")
-        time.sleep(3) #wait till the nodes are disconected
-        assert_equal(len(self.nodes[2].getpeerinfo()), 0) #all nodes must be disconnected at this point
+        time.sleep(3)  # wait till the nodes are disconected
+        assert_equal(len(self.nodes[2].getpeerinfo()), 0)
+                     # all nodes must be disconnected at this point
         assert_equal(len(self.nodes[2].listbanned()), 1)
         self.nodes[2].clearbanned()
         assert_equal(len(self.nodes[2].listbanned()), 0)
         self.nodes[2].setban("127.0.0.0/24", "add")
         assert_equal(len(self.nodes[2].listbanned()), 1)
         try:
-            self.nodes[2].setban("127.0.0.1", "add") #throws exception because 127.0.0.1 is within range 127.0.0.0/24
+            self.nodes[2].setban("127.0.0.1", "add")
+                                 # throws exception because 127.0.0.1 is within
+                                 # range 127.0.0.0/24
         except:
             pass
-        assert_equal(len(self.nodes[2].listbanned()), 1) #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
+        assert_equal(len(self.nodes[2].listbanned()), 1)
+                     # still only one banned ip because 127.0.0.1 is within the
+                     # range of 127.0.0.0/24
         try:
             self.nodes[2].setban("127.0.0.1", "remove")
         except:
@@ -47,16 +54,18 @@
         self.nodes[2].clearbanned()
         assert_equal(len(self.nodes[2].listbanned()), 0)
 
-        ##test persisted banlist
+        # test persisted banlist
         self.nodes[2].setban("127.0.0.0/32", "add")
         self.nodes[2].setban("127.0.0.0/24", "add")
-        self.nodes[2].setban("192.168.0.1", "add", 1) #ban for 1 seconds
-        self.nodes[2].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) #ban for 1000 seconds
+        self.nodes[2].setban("192.168.0.1", "add", 1)  # ban for 1 seconds
+        self.nodes[2].setban(
+            "2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000)  # ban for 1000 seconds
         listBeforeShutdown = self.nodes[2].listbanned()
-        assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) #must be here
-        time.sleep(2) #make 100% sure we expired 192.168.0.1 node time
+        assert_equal(
+            "192.168.0.1/32", listBeforeShutdown[2]['address'])  # must be here
+        time.sleep(2)  # make 100% sure we expired 192.168.0.1 node time
 
-        #stop node
+        # stop node
         stop_node(self.nodes[2], 2)
 
         self.nodes[2] = start_node(2, self.options.tmpdir)
@@ -65,21 +74,21 @@
         assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
         assert_equal("/19" in listAfterShutdown[2]['address'], True)
 
-        ###########################
+        #
         # RPC disconnectnode test #
-        ###########################
+        #
         url = urllib.parse.urlparse(self.nodes[1].url)
-        self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
-        time.sleep(2) #disconnecting a node needs a little bit of time
+        self.nodes[0].disconnectnode(url.hostname + ":" + str(p2p_port(1)))
+        time.sleep(2)  # disconnecting a node needs a little bit of time
         for node in self.nodes[0].getpeerinfo():
-            assert(node['addr'] != url.hostname+":"+str(p2p_port(1)))
+            assert(node['addr'] != url.hostname + ":" + str(p2p_port(1)))
 
-        connect_nodes_bi(self.nodes,0,1) #reconnect the node
+        connect_nodes_bi(self.nodes, 0, 1)  # reconnect the node
         found = False
         for node in self.nodes[0].getpeerinfo():
-            if node['addr'] == url.hostname+":"+str(p2p_port(1)):
+            if node['addr'] == url.hostname + ":" + str(p2p_port(1)):
                 found = True
         assert(found)
 
 if __name__ == '__main__':
-    NodeHandlingTest ().main ()
+    NodeHandlingTest().main()
diff --git a/qa/rpc-tests/p2p-compactblocks.py b/qa/rpc-tests/p2p-compactblocks.py
--- a/qa/rpc-tests/p2p-compactblocks.py
+++ b/qa/rpc-tests/p2p-compactblocks.py
@@ -18,7 +18,10 @@
 '''
 
 # TestNode: A peer we use to send messages to bitcoind, and store responses.
+
+
 class TestNode(SingleNodeConnCB):
+
     def __init__(self):
         SingleNodeConnCB.__init__(self)
         self.last_sendcmpct = []
@@ -46,7 +49,8 @@
         self.last_cmpctblock = message
         self.block_announced = True
         self.last_cmpctblock.header_and_shortids.header.calc_sha256()
-        self.set_announced_blockhashes.add(self.last_cmpctblock.header_and_shortids.header.sha256)
+        self.set_announced_blockhashes.add(
+            self.last_cmpctblock.header_and_shortids.header.sha256)
 
     def on_headers(self, conn, message):
         self.last_headers = message
@@ -110,7 +114,9 @@
             return (block_hash in self.set_announced_blockhashes)
         return wait_until(received_hash, timeout=timeout)
 
+
 class CompactBlocksTest(BitcoinTestFramework):
+
     def __init__(self):
         super().__init__()
         self.setup_clean_chain = True
@@ -121,16 +127,17 @@
         self.nodes = []
 
         # Start up two version 1 CB nodes.
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, 
-                [["-debug", "-logtimemicros=1"], 
-                 ["-debug", "-logtimemicros", "-txindex"]])
+        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
+                                 [["-debug", "-logtimemicros=1"],
+                                  ["-debug", "-logtimemicros", "-txindex"]])
         connect_nodes(self.nodes[0], 1)
 
     def build_block_on_tip(self, node):
         height = node.getblockcount()
         tip = node.getbestblockhash()
         mtp = node.getblockheader(tip)['mediantime']
-        block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
+        block = create_block(
+            int(tip, 16), create_coinbase(height + 1), mtp + 1)
         block.nVersion = 4
         block.solve()
         return block
@@ -178,7 +185,8 @@
         assert(got_message)
         with mininode_lock:
             # Check that the first version received is the preferred one
-            assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
+            assert_equal(
+                test_node.last_sendcmpct[0].version, preferred_version)
             # And that we receive versions down to 1.
             assert_equal(test_node.last_sendcmpct[-1].version, 1)
             test_node.last_sendcmpct = []
@@ -198,11 +206,13 @@
                         block_hash, peer.last_cmpctblock, peer.last_inv))
 
         # We shouldn't get any block announcements via cmpctblock yet.
-        check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
+        check_announcement_of_new_block(
+            node, test_node, lambda p: p.last_cmpctblock is None)
 
         # Try one more time, this time after requesting headers.
         test_node.request_headers_and_sync(locator=[tip])
-        check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_inv is not None)
+        check_announcement_of_new_block(
+            node, test_node, lambda p: p.last_cmpctblock is None and p.last_inv is not None)
 
         # Test a few ways of using sendcmpct that should NOT
         # result in compact block announcements.
@@ -211,10 +221,11 @@
 
         # Now try a SENDCMPCT message with too-high version
         sendcmpct = msg_sendcmpct()
-        sendcmpct.version = 999 # was: preferred_version+1
+        sendcmpct.version = 999  # was: preferred_version+1
         sendcmpct.announce = True
         test_node.send_and_ping(sendcmpct)
-        check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
+        check_announcement_of_new_block(
+            node, test_node, lambda p: p.last_cmpctblock is None)
 
         # Headers sync before next test.
         test_node.request_headers_and_sync(locator=[tip])
@@ -223,7 +234,8 @@
         sendcmpct.version = preferred_version
         sendcmpct.announce = False
         test_node.send_and_ping(sendcmpct)
-        check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
+        check_announcement_of_new_block(
+            node, test_node, lambda p: p.last_cmpctblock is None)
 
         # Headers sync before next test.
         test_node.request_headers_and_sync(locator=[tip])
@@ -232,38 +244,45 @@
         sendcmpct.version = preferred_version
         sendcmpct.announce = True
         test_node.send_and_ping(sendcmpct)
-        check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
+        check_announcement_of_new_block(
+            node, test_node, lambda p: p.last_cmpctblock is not None)
 
         # Try one more time (no headers sync should be needed!)
-        check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
+        check_announcement_of_new_block(
+            node, test_node, lambda p: p.last_cmpctblock is not None)
 
         # Try one more time, after turning on sendheaders
         test_node.send_and_ping(msg_sendheaders())
-        check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
+        check_announcement_of_new_block(
+            node, test_node, lambda p: p.last_cmpctblock is not None)
 
         # Try one more time, after sending a version-1, announce=false message.
-        sendcmpct.version = preferred_version-1
+        sendcmpct.version = preferred_version - 1
         sendcmpct.announce = False
         test_node.send_and_ping(sendcmpct)
-        check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
+        check_announcement_of_new_block(
+            node, test_node, lambda p: p.last_cmpctblock is not None)
 
         # Now turn off announcements
         sendcmpct.version = preferred_version
         sendcmpct.announce = False
         test_node.send_and_ping(sendcmpct)
-        check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_headers is not None)
+        check_announcement_of_new_block(
+            node, test_node, lambda p: p.last_cmpctblock is None and p.last_headers is not None)
 
         if old_node is not None:
             # Verify that a peer using an older protocol version can receive
             # announcements from this node.
-            sendcmpct.version = 1 # preferred_version-1
+            sendcmpct.version = 1  # preferred_version-1
             sendcmpct.announce = True
             old_node.send_and_ping(sendcmpct)
             # Header sync
             old_node.request_headers_and_sync(locator=[tip])
-            check_announcement_of_new_block(node, old_node, lambda p: p.last_cmpctblock is not None)
+            check_announcement_of_new_block(
+                node, old_node, lambda p: p.last_cmpctblock is not None)
 
-    # This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
+    # This test actually causes bitcoind to (reasonably!) disconnect us, so do
+    # this last.
     def test_invalid_cmpctblock_message(self):
         self.nodes[0].generate(101)
         block = self.build_block_on_tip(self.nodes[0])
@@ -275,7 +294,8 @@
         prefilled_txn = PrefilledTransaction(1, block.vtx[0])
         cmpct_block.prefilled_txn = [prefilled_txn]
         self.test_node.send_and_ping(msg_cmpctblock(cmpct_block))
-        assert(int(self.nodes[0].getbestblockhash(), 16) == block.hashPrevBlock)
+        assert(
+            int(self.nodes[0].getbestblockhash(), 16) == block.hashPrevBlock)
 
     # Compare the generated shortids to what we expect based on BIP 152, given
     # bitcoind's choice of nonce.
@@ -316,8 +336,10 @@
         with mininode_lock:
             assert(test_node.last_cmpctblock is not None)
             # Convert the on-the-wire representation to absolute indexes
-            header_and_shortids = HeaderAndShortIDs(test_node.last_cmpctblock.header_and_shortids)
-        self.check_compactblock_construction_from_block(header_and_shortids, block_hash, block)
+            header_and_shortids = HeaderAndShortIDs(
+                test_node.last_cmpctblock.header_and_shortids)
+        self.check_compactblock_construction_from_block(
+            header_and_shortids, block_hash, block)
 
         # Now fetch the compact block using a normal non-announce getdata
         with mininode_lock:
@@ -333,8 +355,10 @@
         with mininode_lock:
             assert(test_node.last_cmpctblock is not None)
             # Convert the on-the-wire representation to absolute indexes
-            header_and_shortids = HeaderAndShortIDs(test_node.last_cmpctblock.header_and_shortids)
-        self.check_compactblock_construction_from_block(header_and_shortids, block_hash, block)
+            header_and_shortids = HeaderAndShortIDs(
+                test_node.last_cmpctblock.header_and_shortids)
+        self.check_compactblock_construction_from_block(
+            header_and_shortids, block_hash, block)
 
     def check_compactblock_construction_from_block(self, header_and_shortids, block_hash, block):
         # Check that we got the right block!
@@ -358,7 +382,8 @@
             assert(entry.tx.wit.is_null())
 
         # Check that the cmpctblock message announced all the transactions.
-        assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
+        assert_equal(len(header_and_shortids.prefilled_txn)
+                     + len(header_and_shortids.shortids), len(block.vtx))
 
         # And now check that all the shortids are as expected as well.
         # Determine the siphash keys to use.
@@ -390,12 +415,14 @@
 
             if announce == "inv":
                 test_node.send_message(msg_inv([CInv(2, block.sha256)]))
-                success = wait_until(lambda: test_node.last_getheaders is not None, timeout=30)
+                success = wait_until(
+                    lambda: test_node.last_getheaders is not None, timeout=30)
                 assert(success)
                 test_node.send_header_for_blocks([block])
             else:
                 test_node.send_header_for_blocks([block])
-            success = wait_until(lambda: test_node.last_getdata is not None, timeout=30)
+            success = wait_until(
+                lambda: test_node.last_getdata is not None, timeout=30)
             assert(success)
             assert_equal(len(test_node.last_getdata.inv), 1)
             assert_equal(test_node.last_getdata.inv[0].type, 4)
@@ -410,13 +437,14 @@
             if version == 2:
                 coinbase_hash = block.vtx[0].calc_sha256(True)
             comp_block.shortids = [
-                    calculate_shortid(k0, k1, coinbase_hash) ]
+                calculate_shortid(k0, k1, coinbase_hash)]
             test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
             assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
             # Expect a getblocktxn message.
             with mininode_lock:
                 assert(test_node.last_getblocktxn is not None)
-                absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute()
+                absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute(
+                )
             assert_equal(absolute_indexes, [0])  # should be a coinbase request
 
             # Send the coinbase, and verify that the tip advances.
@@ -449,14 +477,15 @@
     # node needs, and that responding to them causes the block to be
     # reconstructed.
     def test_getblocktxn_requests(self, node, test_node, version):
-        with_witness = (version==2)
+        with_witness = (version == 2)
 
         def test_getblocktxn_response(compact_block, peer, expected_result):
             msg = msg_cmpctblock(compact_block.to_p2p())
             peer.send_and_ping(msg)
             with mininode_lock:
                 assert(peer.last_getblocktxn is not None)
-                absolute_indexes = peer.last_getblocktxn.block_txn_request.to_absolute()
+                absolute_indexes = peer.last_getblocktxn.block_txn_request.to_absolute(
+                )
             assert_equal(absolute_indexes, expected_result)
 
         def test_tip_after_message(node, peer, msg, tip):
@@ -468,7 +497,8 @@
         utxo = self.utxos.pop(0)
 
         block = self.build_block_with_transactions(node, utxo, 5)
-        self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+        self.utxos.append(
+            [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
         comp_block = HeaderAndShortIDs()
         comp_block.initialize_from_block(block, use_witness=with_witness)
 
@@ -476,40 +506,48 @@
 
         msg_bt = msg_blocktxn()
         if with_witness:
-            msg_bt = msg_witness_blocktxn() # serialize with witnesses
-        msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
+            msg_bt = msg_witness_blocktxn()  # serialize with witnesses
+        msg_bt.block_transactions = BlockTransactions(
+            block.sha256, block.vtx[1:])
         test_tip_after_message(node, test_node, msg_bt, block.sha256)
 
         utxo = self.utxos.pop(0)
         block = self.build_block_with_transactions(node, utxo, 5)
-        self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+        self.utxos.append(
+            [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
 
         # Now try interspersing the prefilled transactions
-        comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
+        comp_block.initialize_from_block(
+            block, prefill_list=[0, 1, 5], use_witness=with_witness)
         test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
-        msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
+        msg_bt.block_transactions = BlockTransactions(
+            block.sha256, block.vtx[2:5])
         test_tip_after_message(node, test_node, msg_bt, block.sha256)
 
         # Now try giving one transaction ahead of time.
         utxo = self.utxos.pop(0)
         block = self.build_block_with_transactions(node, utxo, 5)
-        self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+        self.utxos.append(
+            [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
         test_node.send_and_ping(msg_tx(block.vtx[1]))
         assert(block.vtx[1].hash in node.getrawmempool())
 
         # Prefill 4 out of the 6 transactions, and verify that only the one
         # that was not in the mempool is requested.
-        comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
+        comp_block.initialize_from_block(
+            block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
         test_getblocktxn_response(comp_block, test_node, [5])
 
-        msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
+        msg_bt.block_transactions = BlockTransactions(
+            block.sha256, [block.vtx[5]])
         test_tip_after_message(node, test_node, msg_bt, block.sha256)
 
         # Now provide all transactions to the node before the block is
         # announced and verify reconstruction happens immediately.
         utxo = self.utxos.pop(0)
         block = self.build_block_with_transactions(node, utxo, 10)
-        self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+        self.utxos.append(
+            [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
         for tx in block.vtx[1:]:
             test_node.send_message(msg_tx(tx))
         test_node.sync_with_ping()
@@ -523,8 +561,10 @@
             test_node.last_getblocktxn = None
 
         # Send compact block
-        comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
-        test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
+        comp_block.initialize_from_block(
+            block, prefill_list=[0], use_witness=with_witness)
+        test_tip_after_message(
+            node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
         with mininode_lock:
             # Shouldn't have gotten a request for any transaction
             assert(test_node.last_getblocktxn is None)
@@ -537,7 +577,8 @@
         utxo = self.utxos.pop(0)
 
         block = self.build_block_with_transactions(node, utxo, 10)
-        self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+        self.utxos.append(
+            [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
         # Relay the first 5 transactions from the block in advance
         for tx in block.vtx[1:6]:
             test_node.send_message(msg_tx(tx))
@@ -549,12 +590,14 @@
 
         # Send compact block
         comp_block = HeaderAndShortIDs()
-        comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
+        comp_block.initialize_from_block(
+            block, prefill_list=[0], use_witness=(version == 2))
         test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
         absolute_indexes = []
         with mininode_lock:
             assert(test_node.last_getblocktxn is not None)
-            absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute()
+            absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute(
+            )
         assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
 
         # Now give an incorrect response.
@@ -566,23 +609,26 @@
         # verifying that the block isn't marked bad permanently. This is good
         # enough for now.
         msg = msg_blocktxn()
-        if version==2:
+        if version == 2:
             msg = msg_witness_blocktxn()
-        msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
+        msg.block_transactions = BlockTransactions(
+            block.sha256, [block.vtx[5]] + block.vtx[7:])
         test_node.send_and_ping(msg)
 
         # Tip should not have updated
         assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
 
         # We should receive a getdata request
-        success = wait_until(lambda: test_node.last_getdata is not None, timeout=10)
+        success = wait_until(
+            lambda: test_node.last_getdata is not None, timeout=10)
         assert(success)
         assert_equal(len(test_node.last_getdata.inv), 1)
-        assert(test_node.last_getdata.inv[0].type == 2 or test_node.last_getdata.inv[0].type == 2|MSG_WITNESS_FLAG)
+        assert(test_node.last_getdata.inv[
+               0].type == 2 or test_node.last_getdata.inv[0].type == 2 | MSG_WITNESS_FLAG)
         assert_equal(test_node.last_getdata.inv[0].hash, block.sha256)
 
         # Deliver the block
-        if version==2:
+        if version == 2:
             test_node.send_and_ping(msg_witness_block(block))
         else:
             test_node.send_and_ping(msg_block(block))
@@ -599,19 +645,24 @@
             block = FromHex(CBlock(), node.getblock(block_hash, False))
 
             msg = msg_getblocktxn()
-            msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
+            msg.block_txn_request = BlockTransactionsRequest(
+                int(block_hash, 16), [])
             num_to_request = random.randint(1, len(block.vtx))
-            msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
+            msg.block_txn_request.from_absolute(
+                sorted(random.sample(range(len(block.vtx)), num_to_request)))
             test_node.send_message(msg)
-            success = wait_until(lambda: test_node.last_blocktxn is not None, timeout=10)
+            success = wait_until(
+                lambda: test_node.last_blocktxn is not None, timeout=10)
             assert(success)
 
             [tx.calc_sha256() for tx in block.vtx]
             with mininode_lock:
-                assert_equal(test_node.last_blocktxn.block_transactions.blockhash, int(block_hash, 16))
+                assert_equal(
+                    test_node.last_blocktxn.block_transactions.blockhash, int(block_hash, 16))
                 all_indices = msg.block_txn_request.to_absolute()
                 for index in all_indices:
-                    tx = test_node.last_blocktxn.block_transactions.transactions.pop(0)
+                    tx = test_node.last_blocktxn.block_transactions.transactions.pop(
+                        0)
                     tx.calc_sha256()
                     assert_equal(tx.sha256, block.vtx[index].sha256)
                     if version == 1:
@@ -619,21 +670,24 @@
                         assert(tx.wit.is_null())
                     else:
                         # Check that the witness matches
-                        assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
+                        assert_equal(
+                            tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
                 test_node.last_blocktxn = None
             current_height -= 1
 
         # Next request should send a full block response, as we're past the
         # allowed depth for a blocktxn response.
         block_hash = node.getblockhash(current_height)
-        msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
+        msg.block_txn_request = BlockTransactionsRequest(
+            int(block_hash, 16), [0])
         with mininode_lock:
             test_node.last_block = None
             test_node.last_blocktxn = None
         test_node.send_and_ping(msg)
         with mininode_lock:
             test_node.last_block.block.calc_sha256()
-            assert_equal(test_node.last_block.block.sha256, int(block_hash, 16))
+            assert_equal(
+                test_node.last_block.block.sha256, int(block_hash, 16))
             assert_equal(test_node.last_blocktxn, None)
 
     def test_compactblocks_not_at_tip(self, node, test_node):
@@ -647,7 +701,8 @@
 
         test_node.clear_block_announcement()
         test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
-        success = wait_until(lambda: test_node.last_cmpctblock is not None, timeout=30)
+        success = wait_until(
+            lambda: test_node.last_cmpctblock is not None, timeout=30)
         assert(success)
 
         test_node.clear_block_announcement()
@@ -657,15 +712,17 @@
         with mininode_lock:
             test_node.last_block = None
         test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
-        success = wait_until(lambda: test_node.last_block is not None, timeout=30)
+        success = wait_until(
+            lambda: test_node.last_block is not None, timeout=30)
         assert(success)
         with mininode_lock:
             test_node.last_block.block.calc_sha256()
-            assert_equal(test_node.last_block.block.sha256, int(new_blocks[0], 16))
+            assert_equal(
+                test_node.last_block.block.sha256, int(new_blocks[0], 16))
 
         # Generate an old compactblock, and verify that it's not accepted.
         cur_height = node.getblockcount()
-        hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
+        hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)
         block = self.build_block_on_tip(node)
         block.hashPrevBlock = hashPrevBlock
         block.solve()
@@ -708,7 +765,8 @@
             for l in listeners:
                 assert(l.last_cmpctblock is not None)
                 l.last_cmpctblock.header_and_shortids.header.calc_sha256()
-                assert_equal(l.last_cmpctblock.header_and_shortids.header.sha256, block.sha256)
+                assert_equal(
+                    l.last_cmpctblock.header_and_shortids.header.sha256, block.sha256)
 
     # Test that we don't get disconnected if we relay a compact block with valid header,
     # but invalid transactions.
@@ -724,7 +782,8 @@
         # Now send the compact block with all transactions prefilled, and
         # verify that we don't get disconnected.
         comp_block = HeaderAndShortIDs()
-        comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=False)
+        comp_block.initialize_from_block(
+            block, prefill_list=[0, 1, 2, 3, 4], use_witness=False)
         msg = msg_cmpctblock(comp_block.to_p2p())
         test_node.send_and_ping(msg)
 
@@ -770,7 +829,8 @@
         delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
         assert_equal(int(node.getbestblockhash(), 16), block.sha256)
 
-        self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+        self.utxos.append(
+            [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
 
         # Now test that delivering an invalid compact block won't break relay
 
@@ -779,8 +839,9 @@
             delivery_peer.send_message(msg_tx(tx))
         delivery_peer.sync_with_ping()
 
-        cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ]
-        cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
+        cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [CTxInWitness()]
+        cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[
+            0].scriptWitness.stack = [ser_uint256(0)]
 
         cmpct_block.use_witness = True
         delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
@@ -799,11 +860,12 @@
         self.old_node = TestNode()  # version 1 peer
 
         connections = []
-        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
+        connections.append(
+            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
         connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
-                    self.ex_softfork_node, services=NODE_NETWORK))
+                                    self.ex_softfork_node, services=NODE_NETWORK))
         connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
-                    self.old_node, services=NODE_NETWORK))
+                                    self.old_node, services=NODE_NETWORK))
         self.test_node.add_connection(connections[0])
         self.ex_softfork_node.add_connection(connections[1])
         self.old_node.add_connection(connections[2])
@@ -821,19 +883,22 @@
         print("\tTesting SENDCMPCT p2p message... ")
         self.test_sendcmpct(self.nodes[0], self.test_node, 1)
         sync_blocks(self.nodes)
-        self.test_sendcmpct(self.nodes[1], self.ex_softfork_node, 1, old_node=self.old_node)
+        self.test_sendcmpct(
+            self.nodes[1], self.ex_softfork_node, 1, old_node=self.old_node)
         sync_blocks(self.nodes)
 
         print("\tTesting compactblock construction...")
         self.test_compactblock_construction(self.nodes[0], self.test_node)
         sync_blocks(self.nodes)
-        self.test_compactblock_construction(self.nodes[1], self.ex_softfork_node)
+        self.test_compactblock_construction(
+            self.nodes[1], self.ex_softfork_node)
         sync_blocks(self.nodes)
 
         print("\tTesting compactblock requests... ")
         self.test_compactblock_requests(self.nodes[0], self.test_node, 1)
         sync_blocks(self.nodes)
-        self.test_compactblock_requests(self.nodes[1], self.ex_softfork_node, 2)
+        self.test_compactblock_requests(
+            self.nodes[1], self.ex_softfork_node, 2)
         sync_blocks(self.nodes)
 
         print("\tTesting getblocktxn requests...")
@@ -849,34 +914,42 @@
         self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
         sync_blocks(self.nodes)
 
-        print("\tTesting compactblock requests/announcements not at chain tip...")
+        print(
+            "\tTesting compactblock requests/announcements not at chain tip...")
         self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
         sync_blocks(self.nodes)
-        self.test_compactblocks_not_at_tip(self.nodes[1], self.ex_softfork_node)
+        self.test_compactblocks_not_at_tip(
+            self.nodes[1], self.ex_softfork_node)
         self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
         sync_blocks(self.nodes)
 
         print("\tTesting handling of incorrect blocktxn responses...")
         self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
         sync_blocks(self.nodes)
-        self.test_incorrect_blocktxn_response(self.nodes[1], self.ex_softfork_node, 2)
+        self.test_incorrect_blocktxn_response(
+            self.nodes[1], self.ex_softfork_node, 2)
         sync_blocks(self.nodes)
 
         # End-to-end block relay tests
         print("\tTesting end-to-end block relay...")
         self.request_cb_announcements(self.test_node, self.nodes[0])
         self.request_cb_announcements(self.old_node, self.nodes[1])
-        self.request_cb_announcements(self.ex_softfork_node, self.nodes[1], version=2)
-        self.test_end_to_end_block_relay(self.nodes[0], [self.ex_softfork_node, self.test_node, self.old_node])
-        self.test_end_to_end_block_relay(self.nodes[1], [self.ex_softfork_node, self.test_node, self.old_node])
+        self.request_cb_announcements(
+            self.ex_softfork_node, self.nodes[1], version=2)
+        self.test_end_to_end_block_relay(
+            self.nodes[0], [self.ex_softfork_node, self.test_node, self.old_node])
+        self.test_end_to_end_block_relay(
+            self.nodes[1], [self.ex_softfork_node, self.test_node, self.old_node])
 
         print("\tTesting handling of invalid compact blocks...")
         self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node)
-        self.test_invalid_tx_in_compactblock(self.nodes[1], self.ex_softfork_node)
+        self.test_invalid_tx_in_compactblock(
+            self.nodes[1], self.ex_softfork_node)
         self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node)
 
         print("\tTesting reconstructing compact blocks from all peers...")
-        self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.ex_softfork_node, self.old_node)
+        self.test_compactblock_reconstruction_multiple_peers(
+            self.nodes[1], self.ex_softfork_node, self.old_node)
         sync_blocks(self.nodes)
 
         print("\tTesting invalid index in cmpctblock message...")
diff --git a/qa/rpc-tests/p2p-leaktests.py b/qa/rpc-tests/p2p-leaktests.py
--- a/qa/rpc-tests/p2p-leaktests.py
+++ b/qa/rpc-tests/p2p-leaktests.py
@@ -19,7 +19,9 @@
 
 banscore = 10
 
+
 class CLazyNode(NodeConnCB):
+
     def __init__(self):
         self.connection = None
         self.unexpected_msg = False
@@ -40,36 +42,61 @@
         self.connected = True
 
     def on_version(self, conn, message): self.bad_message(message)
+
     def on_verack(self, conn, message): self.bad_message(message)
+
     def on_reject(self, conn, message): self.bad_message(message)
+
     def on_inv(self, conn, message): self.bad_message(message)
+
     def on_addr(self, conn, message): self.bad_message(message)
+
     def on_alert(self, conn, message): self.bad_message(message)
+
     def on_getdata(self, conn, message): self.bad_message(message)
+
     def on_getblocks(self, conn, message): self.bad_message(message)
+
     def on_tx(self, conn, message): self.bad_message(message)
+
     def on_block(self, conn, message): self.bad_message(message)
+
     def on_getaddr(self, conn, message): self.bad_message(message)
+
     def on_headers(self, conn, message): self.bad_message(message)
+
     def on_getheaders(self, conn, message): self.bad_message(message)
+
     def on_ping(self, conn, message): self.bad_message(message)
+
     def on_mempool(self, conn): self.bad_message(message)
+
     def on_pong(self, conn, message): self.bad_message(message)
+
     def on_feefilter(self, conn, message): self.bad_message(message)
+
     def on_sendheaders(self, conn, message): self.bad_message(message)
+
     def on_sendcmpct(self, conn, message): self.bad_message(message)
+
     def on_cmpctblock(self, conn, message): self.bad_message(message)
+
     def on_getblocktxn(self, conn, message): self.bad_message(message)
+
     def on_blocktxn(self, conn, message): self.bad_message(message)
 
 # Node that never sends a version. We'll use this to send a bunch of messages
 # anyway, and eventually get disconnected.
+
+
 class CNodeNoVersionBan(CLazyNode):
+
     def __init__(self):
         super().__init__()
 
     # send a bunch of veracks without sending a message. This should get us disconnected.
-    # NOTE: implementation-specific check here. Remove if bitcoind ban behavior changes
+    # NOTE: implementation-specific check here. Remove if bitcoind ban
+    # behavior changes
     def on_open(self, conn):
         super().on_open(conn)
         for i in range(banscore):
@@ -79,34 +106,46 @@
 
 # Node that never sends a version. This one just sits idle and hopes to receive
 # any message (it shouldn't!)
+
+
 class CNodeNoVersionIdle(CLazyNode):
+
     def __init__(self):
         super().__init__()
 
 # Node that sends a version but not a verack.
+
+
 class CNodeNoVerackIdle(CLazyNode):
+
     def __init__(self):
         self.version_received = False
         super().__init__()
 
     def on_reject(self, conn, message): pass
+
     def on_verack(self, conn, message): pass
     # When version is received, don't reply with a verack. Instead, see if the
     # node will give us a message that it shouldn't. This is not an exhaustive
     # list!
+
     def on_version(self, conn, message):
         self.version_received = True
         conn.send_message(msg_ping())
         conn.send_message(msg_getaddr())
 
+
 class P2PLeakTest(BitcoinTestFramework):
+
     def __init__(self):
         super().__init__()
         self.num_nodes = 1
+
     def setup_network(self):
-        extra_args = [['-debug', '-banscore='+str(banscore)]
+        extra_args = [['-debug', '-banscore=' + str(banscore)]
                       for i in range(self.num_nodes)]
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
+        self.nodes = start_nodes(
+            self.num_nodes, self.options.tmpdir, extra_args)
 
     def run_test(self):
         no_version_bannode = CNodeNoVersionBan()
@@ -114,24 +153,28 @@
         no_verack_idlenode = CNodeNoVerackIdle()
 
         connections = []
-        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_bannode, send_version=False))
-        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_idlenode, send_version=False))
-        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_verack_idlenode))
+        connections.append(
+            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_bannode, send_version=False))
+        connections.append(
+            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_idlenode, send_version=False))
+        connections.append(
+            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_verack_idlenode))
         no_version_bannode.add_connection(connections[0])
         no_version_idlenode.add_connection(connections[1])
         no_verack_idlenode.add_connection(connections[2])
 
         NetworkThread().start()  # Start up network handling in another thread
 
-        assert(wait_until(lambda: no_version_bannode.connected and no_version_idlenode.connected and no_verack_idlenode.version_received, timeout=10))
+        assert(
+            wait_until(lambda: no_version_bannode.connected and no_version_idlenode.connected and no_verack_idlenode.version_received, timeout=10))
 
         # Mine a block and make sure that it's not sent to the connected nodes
         self.nodes[0].generate(1)
 
-        #Give the node enough time to possibly leak out a message
+        # Give the node enough time to possibly leak out a message
         time.sleep(5)
 
-        #This node should have been banned
+        # This node should have been banned
         assert(no_version_bannode.connection.state == "closed")
 
         [conn.disconnect_node() for conn in connections]
diff --git a/qa/rpc-tests/p2p-mempool.py b/qa/rpc-tests/p2p-mempool.py
--- a/qa/rpc-tests/p2p-mempool.py
+++ b/qa/rpc-tests/p2p-mempool.py
@@ -7,7 +7,9 @@
 from test_framework.test_framework import BitcoinTestFramework
 from test_framework.util import *
 
+
 class TestNode(NodeConnCB):
+
     def __init__(self):
         NodeConnCB.__init__(self)
         self.connection = None
@@ -70,6 +72,7 @@
         self.lastInv = []
         self.send_message(msg_mempool())
 
+
 class P2PMempoolTests(BitcoinTestFramework):
 
     def __init__(self):
@@ -80,22 +83,23 @@
     def setup_network(self):
         # Start a node with maxuploadtarget of 200 MB (/24h)
         self.nodes = []
-        self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-peerbloomfilters=0"]))
+        self.nodes.append(
+            start_node(0, self.options.tmpdir, ["-debug", "-peerbloomfilters=0"]))
 
     def run_test(self):
-        #connect a mininode
+        # connect a mininode
         aTestNode = TestNode()
         node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
         aTestNode.add_connection(node)
         NetworkThread().start()
         aTestNode.wait_for_verack()
 
-        #request mempool
+        # request mempool
         aTestNode.send_mempool()
         aTestNode.wait_for_disconnect()
 
-        #mininode must be disconnected at this point
+        # mininode must be disconnected at this point
         assert_equal(len(self.nodes[0].getpeerinfo()), 0)
-    
+
 if __name__ == '__main__':
     P2PMempoolTests().main()
diff --git a/qa/rpc-tests/p2p-timeouts.py b/qa/rpc-tests/p2p-timeouts.py
--- a/qa/rpc-tests/p2p-timeouts.py
+++ b/qa/rpc-tests/p2p-timeouts.py
@@ -27,7 +27,9 @@
 from test_framework.test_framework import BitcoinTestFramework
 from test_framework.util import *
 
+
 class TestNode(SingleNodeConnCB):
+
     def __init__(self):
         SingleNodeConnCB.__init__(self)
         self.connected = False
@@ -43,7 +45,9 @@
         # Don't send a verack in response
         self.received_version = True
 
+
 class TimeoutsTest(BitcoinTestFramework):
+
     def __init__(self):
         super().__init__()
         self.setup_clean_chain = True
@@ -53,19 +57,22 @@
         self.nodes = []
 
         # Start up node0 to be a version 1, pre-segwit node.
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, 
-                [["-debug", "-logtimemicros=1"]])
+        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
+                                 [["-debug", "-logtimemicros=1"]])
 
     def run_test(self):
         # Setup the p2p connections and start up the network thread.
-        self.no_verack_node = TestNode() # never send verack
-        self.no_version_node = TestNode() # never send version (just ping)
-        self.no_send_node = TestNode() # never send anything
+        self.no_verack_node = TestNode()  # never send verack
+        self.no_version_node = TestNode()  # never send version (just ping)
+        self.no_send_node = TestNode()  # never send anything
 
         connections = []
-        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_verack_node))
-        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_version_node, send_version=False))
-        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_send_node, send_version=False))
+        connections.append(
+            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_verack_node))
+        connections.append(
+            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_version_node, send_version=False))
+        connections.append(
+            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_send_node, send_version=False))
         self.no_verack_node.add_connection(connections[0])
         self.no_version_node.add_connection(connections[1])
         self.no_send_node.add_connection(connections[2])
diff --git a/qa/rpc-tests/p2p-versionbits-warning.py b/qa/rpc-tests/p2p-versionbits-warning.py
--- a/qa/rpc-tests/p2p-versionbits-warning.py
+++ b/qa/rpc-tests/p2p-versionbits-warning.py
@@ -17,18 +17,22 @@
 soft-forks, and test that warning alerts are generated.
 '''
 
-VB_PERIOD = 144 # versionbits period length for regtest
-VB_THRESHOLD = 108 # versionbits activation threshold for regtest
+VB_PERIOD = 144  # versionbits period length for regtest
+VB_THRESHOLD = 108  # versionbits activation threshold for regtest
 VB_TOP_BITS = 0x20000000
-VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment
+VB_UNKNOWN_BIT = 27  # Choose a bit unassigned to any deployment
 
 WARN_UNKNOWN_RULES_MINED = "Unknown block versions being mined! It's possible unknown rules are in effect"
-WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT)
+WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(
+    VB_UNKNOWN_BIT)
 VB_PATTERN = re.compile("^Warning.*versionbit")
 
 # TestNode: bare-bones "peer".  Used mostly as a conduit for a test to sending
 # p2p messages to a node, generating the messages in the main testing logic.
+
+
 class TestNode(NodeConnCB):
+
     def __init__(self):
         NodeConnCB.__init__(self)
         self.connection = None
@@ -64,6 +68,7 @@
 
 
 class VersionBitsWarningTest(BitcoinTestFramework):
+
     def __init__(self):
         super().__init__()
         self.setup_clean_chain = True
@@ -74,18 +79,20 @@
         # Open and close to create zero-length file
         with open(self.alert_filename, 'w', encoding='utf8') as _:
             pass
-        self.extra_args = [["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]]
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
+        self.extra_args = [
+            ["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]]
+        self.nodes = start_nodes(
+            self.num_nodes, self.options.tmpdir, self.extra_args)
 
     # Send numblocks blocks via peer with nVersionToUse set.
     def send_blocks_with_version(self, peer, numblocks, nVersionToUse):
         tip = self.nodes[0].getbestblockhash()
         height = self.nodes[0].getblockcount()
-        block_time = self.nodes[0].getblockheader(tip)["time"]+1
+        block_time = self.nodes[0].getblockheader(tip)["time"] + 1
         tip = int(tip, 16)
 
         for _ in range(numblocks):
-            block = create_block(tip, create_coinbase(height+1), block_time)
+            block = create_block(tip, create_coinbase(height + 1), block_time)
             block.nVersion = nVersionToUse
             block.solve()
             peer.send_message(msg_block(block))
@@ -104,10 +111,11 @@
         test_node = TestNode()
 
         connections = []
-        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
+        connections.append(
+            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
         test_node.add_connection(connections[0])
 
-        NetworkThread().start() # Start up network handling in another thread
+        NetworkThread().start()  # Start up network handling in another thread
 
         # Test logic begins here
         test_node.wait_for_verack()
@@ -117,8 +125,8 @@
 
         # 2. Now build one period of blocks on the tip, with < VB_THRESHOLD
         # blocks signaling some unknown bit.
-        nVersion = VB_TOP_BITS | (1<<VB_UNKNOWN_BIT)
-        self.send_blocks_with_version(test_node, VB_THRESHOLD-1, nVersion)
+        nVersion = VB_TOP_BITS | (1 << VB_UNKNOWN_BIT)
+        self.send_blocks_with_version(test_node, VB_THRESHOLD - 1, nVersion)
 
         # Fill rest of period with regular version blocks
         self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD + 1)
@@ -126,7 +134,8 @@
         # get*info()
         assert(not VB_PATTERN.match(self.nodes[0].getinfo()["errors"]))
         assert(not VB_PATTERN.match(self.nodes[0].getmininginfo()["errors"]))
-        assert(not VB_PATTERN.match(self.nodes[0].getnetworkinfo()["warnings"]))
+        assert(not VB_PATTERN.match(
+            self.nodes[0].getnetworkinfo()["warnings"]))
 
         # 3. Now build one period of blocks with >= VB_THRESHOLD blocks signaling
         # some unknown bit
@@ -137,8 +146,10 @@
         # being of unexpected version.
         # Check that get*info() shows some kind of error.
         assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getinfo()["errors"])
-        assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getmininginfo()["errors"])
-        assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getnetworkinfo()["warnings"])
+        assert(WARN_UNKNOWN_RULES_MINED in self.nodes[
+               0].getmininginfo()["errors"])
+        assert(WARN_UNKNOWN_RULES_MINED in self.nodes[
+               0].getnetworkinfo()["warnings"])
 
         # Mine a period worth of expected blocks so the generic block-version warning
         # is cleared, and restart the node. This should move the versionbit state
@@ -148,18 +159,22 @@
         # Empty out the alert file
         with open(self.alert_filename, 'w', encoding='utf8') as _:
             pass
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
+        self.nodes = start_nodes(
+            self.num_nodes, self.options.tmpdir, self.extra_args)
 
         # Connecting one block should be enough to generate an error.
         self.nodes[0].generate(1)
         assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getinfo()["errors"])
-        assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getmininginfo()["errors"])
-        assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getnetworkinfo()["warnings"])
+        assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[
+               0].getmininginfo()["errors"])
+        assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[
+               0].getnetworkinfo()["warnings"])
         stop_nodes(self.nodes)
         self.test_versionbits_in_alert_file()
 
         # Test framework expects the node to still be running...
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
+        self.nodes = start_nodes(
+            self.num_nodes, self.options.tmpdir, self.extra_args)
 
 if __name__ == '__main__':
     VersionBitsWarningTest().main()
diff --git a/qa/rpc-tests/preciousblock.py b/qa/rpc-tests/preciousblock.py
--- a/qa/rpc-tests/preciousblock.py
+++ b/qa/rpc-tests/preciousblock.py
@@ -15,6 +15,7 @@
     sync_blocks,
 )
 
+
 def unidirectional_node_sync_via_rpc(node_src, node_dest):
     blocks_to_copy = []
     blockhash = node_src.getbestblockhash()
@@ -24,12 +25,14 @@
             break
         except:
             blocks_to_copy.append(blockhash)
-            blockhash = node_src.getblockheader(blockhash, True)['previousblockhash']
+            blockhash = node_src.getblockheader(
+                blockhash, True)['previousblockhash']
     blocks_to_copy.reverse()
     for blockhash in blocks_to_copy:
         blockdata = node_src.getblock(blockhash, False)
         assert(node_dest.submitblock(blockdata) in (None, 'inconclusive'))
 
+
 def node_sync_via_rpc(nodes):
     for node_src in nodes:
         for node_dest in nodes:
@@ -37,7 +40,9 @@
                 continue
             unidirectional_node_sync_via_rpc(node_src, node_dest)
 
+
 class PreciousTest(BitcoinTestFramework):
+
     def __init__(self):
         super().__init__()
         self.setup_clean_chain = True
@@ -64,9 +69,10 @@
         assert_equal(self.nodes[1].getblockcount(), 5)
         assert(hashC != hashG)
         print("Connect nodes and check no reorg occurs")
-        # Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync)
+        # Submit competing blocks via RPC so any reorg should occur before we
+        # proceed (no way to wait on inaction for p2p sync)
         node_sync_via_rpc(self.nodes[0:2])
-        connect_nodes_bi(self.nodes,0,1)
+        connect_nodes_bi(self.nodes, 0, 1)
         assert_equal(self.nodes[0].getbestblockhash(), hashC)
         assert_equal(self.nodes[1].getbestblockhash(), hashG)
         print("Make Node0 prefer block G")
@@ -77,7 +83,8 @@
         assert_equal(self.nodes[0].getbestblockhash(), hashC)
         print("Make Node1 prefer block C")
         self.nodes[1].preciousblock(hashC)
-        sync_chain(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
+        sync_chain(self.nodes[0:2])
+                   # wait because node 1 may not have downloaded hashC
         assert_equal(self.nodes[1].getbestblockhash(), hashC)
         print("Make Node1 prefer block G again")
         self.nodes[1].preciousblock(hashG)
@@ -103,8 +110,8 @@
         hashL = self.nodes[2].getbestblockhash()
         print("Connect nodes and check no reorg occurs")
         node_sync_via_rpc(self.nodes[1:3])
-        connect_nodes_bi(self.nodes,1,2)
-        connect_nodes_bi(self.nodes,0,2)
+        connect_nodes_bi(self.nodes, 1, 2)
+        connect_nodes_bi(self.nodes, 0, 2)
         assert_equal(self.nodes[0].getbestblockhash(), hashH)
         assert_equal(self.nodes[1].getbestblockhash(), hashH)
         assert_equal(self.nodes[2].getbestblockhash(), hashL)
diff --git a/qa/rpc-tests/proxy_test.py b/qa/rpc-tests/proxy_test.py
--- a/qa/rpc-tests/proxy_test.py
+++ b/qa/rpc-tests/proxy_test.py
@@ -43,6 +43,7 @@
 
 
 class ProxyTest(BitcoinTestFramework):
+
     def __init__(self):
         super().__init__()
         self.num_nodes = 4
@@ -57,14 +58,16 @@
         self.conf1.auth = False
         # ... one supporting authenticated and unauthenticated (Tor)
         self.conf2 = Socks5Configuration()
-        self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
+        self.conf2.addr = (
+            '127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
         self.conf2.unauth = True
         self.conf2.auth = True
         if self.have_ipv6:
             # ... one on IPv6 with similar configuration
             self.conf3 = Socks5Configuration()
             self.conf3.af = socket.AF_INET6
-            self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
+            self.conf3.addr = (
+                '::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
             self.conf3.unauth = True
             self.conf3.auth = True
         else:
@@ -80,15 +83,20 @@
 
     def setup_nodes(self):
         # Note: proxies are not used to connect to local nodes
-        # this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
+        # this is because the proxy to use is based on CService.GetNetwork(),
+        # which return NET_UNROUTABLE for localhost
         args = [
-            ['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'], 
-            ['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'], 
-            ['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'], 
+            ['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' %
+                (self.conf1.addr), '-proxyrandomize=1'],
+            ['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' %
+                (self.conf1.addr), '-onion=%s:%i' % (self.conf2.addr), '-proxyrandomize=0'],
+            ['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' %
+                (self.conf2.addr), '-proxyrandomize=1'],
             []
-            ]
+        ]
         if self.have_ipv6:
-            args[3] = ['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
+            args[3] = ['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' %
+                       (self.conf3.addr), '-proxyrandomize=0', '-noonion']
         return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=args)
 
     def node_test(self, node, proxies, auth, test_onion=True):
@@ -97,7 +105,8 @@
         node.addnode("15.61.23.23:1234", "onetry")
         cmd = proxies[0].queue.get()
         assert(isinstance(cmd, Socks5Command))
-        # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
+        # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME,
+        # even if connecting directly to IPv4/IPv6
         assert_equal(cmd.atyp, AddressType.DOMAINNAME)
         assert_equal(cmd.addr, b"15.61.23.23")
         assert_equal(cmd.port, 1234)
@@ -108,10 +117,12 @@
 
         if self.have_ipv6:
             # Test: outgoing IPv6 connection through node
-            node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
+            node.addnode(
+                "[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
             cmd = proxies[1].queue.get()
             assert(isinstance(cmd, Socks5Command))
-            # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
+            # Note: bitcoind's SOCKS5 implementation only sends atyp
+            # DOMAINNAME, even if connecting directly to IPv4/IPv6
             assert_equal(cmd.atyp, AddressType.DOMAINNAME)
             assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
             assert_equal(cmd.port, 5443)
@@ -149,20 +160,25 @@
 
     def run_test(self):
         # basic -proxy
-        self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
+        self.node_test(
+            self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
 
         # -proxy plus -onion
-        self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
+        self.node_test(
+            self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
 
         # -proxy plus -onion, -proxyrandomize
-        rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
-        # Check that credentials as used for -proxyrandomize connections are unique
-        credentials = set((x.username,x.password) for x in rv)
+        rv = self.node_test(
+            self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
+        # Check that credentials as used for -proxyrandomize connections are
+        # unique
+        credentials = set((x.username, x.password) for x in rv)
         assert_equal(len(credentials), len(rv))
 
         if self.have_ipv6:
             # proxy on IPv6 localhost
-            self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
+            self.node_test(
+                self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
 
         def networks_dict(d):
             r = {}
@@ -172,32 +188,31 @@
 
         # test RPC getnetworkinfo
         n0 = networks_dict(self.nodes[0].getnetworkinfo())
-        for net in ['ipv4','ipv6','onion']:
+        for net in ['ipv4', 'ipv6', 'onion']:
             assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
             assert_equal(n0[net]['proxy_randomize_credentials'], True)
         assert_equal(n0['onion']['reachable'], True)
 
         n1 = networks_dict(self.nodes[1].getnetworkinfo())
-        for net in ['ipv4','ipv6']:
+        for net in ['ipv4', 'ipv6']:
             assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
             assert_equal(n1[net]['proxy_randomize_credentials'], False)
         assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
         assert_equal(n1['onion']['proxy_randomize_credentials'], False)
         assert_equal(n1['onion']['reachable'], True)
-        
+
         n2 = networks_dict(self.nodes[2].getnetworkinfo())
-        for net in ['ipv4','ipv6','onion']:
+        for net in ['ipv4', 'ipv6', 'onion']:
             assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
             assert_equal(n2[net]['proxy_randomize_credentials'], True)
         assert_equal(n2['onion']['reachable'], True)
 
         if self.have_ipv6:
             n3 = networks_dict(self.nodes[3].getnetworkinfo())
-            for net in ['ipv4','ipv6']:
+            for net in ['ipv4', 'ipv6']:
                 assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
                 assert_equal(n3[net]['proxy_randomize_credentials'], False)
             assert_equal(n3['onion']['reachable'], False)
 
 if __name__ == '__main__':
     ProxyTest().main()
-
diff --git a/qa/rpc-tests/reindex.py b/qa/rpc-tests/reindex.py
--- a/qa/rpc-tests/reindex.py
+++ b/qa/rpc-tests/reindex.py
@@ -14,6 +14,7 @@
 )
 import time
 
+
 class ReindexTest(BitcoinTestFramework):
 
     def __init__(self):
@@ -28,8 +29,10 @@
         self.nodes[0].generate(3)
         blockcount = self.nodes[0].getblockcount()
         stop_nodes(self.nodes)
-        extra_args = [["-debug", "-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]]
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
+        extra_args = [
+            ["-debug", "-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]]
+        self.nodes = start_nodes(
+            self.num_nodes, self.options.tmpdir, extra_args)
         while self.nodes[0].getblockcount() < blockcount:
             time.sleep(0.1)
         assert_equal(self.nodes[0].getblockcount(), blockcount)
diff --git a/qa/rpc-tests/rest.py b/qa/rpc-tests/rest.py
--- a/qa/rpc-tests/rest.py
+++ b/qa/rpc-tests/rest.py
@@ -17,6 +17,7 @@
 import http.client
 import urllib.parse
 
+
 def deser_uint256(f):
     r = 0
     for i in range(8):
@@ -24,8 +25,10 @@
         r += t << (i * 32)
     return r
 
-#allows simple http get calls
-def http_get_call(host, port, path, response_object = 0):
+# allows simple http get calls
+
+
+def http_get_call(host, port, path, response_object=0):
     conn = http.client.HTTPConnection(host, port)
     conn.request('GET', path)
 
@@ -34,8 +37,10 @@
 
     return conn.getresponse().read().decode('utf-8')
 
-#allows simple http post calls with a request body
-def http_post_call(host, port, path, requestdata = '', response_object = 0):
+# allows simple http post calls with a request body
+
+
+def http_post_call(host, port, path, requestdata='', response_object=0):
     conn = http.client.HTTPConnection(host, port)
     conn.request('POST', path, requestdata)
 
@@ -44,6 +49,7 @@
 
     return conn.getresponse().read()
 
+
 class RESTTest (BitcoinTestFramework):
     FORMAT_SEPARATOR = "."
 
@@ -54,10 +60,10 @@
 
     def setup_network(self, split=False):
         self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
-        connect_nodes_bi(self.nodes,0,1)
-        connect_nodes_bi(self.nodes,1,2)
-        connect_nodes_bi(self.nodes,0,2)
-        self.is_network_split=False
+        connect_nodes_bi(self.nodes, 0, 1)
+        connect_nodes_bi(self.nodes, 1, 2)
+        connect_nodes_bi(self.nodes, 0, 2)
+        self.is_network_split = False
         self.sync_all()
 
     def run_test(self):
@@ -77,61 +83,66 @@
         self.sync_all()
         bb_hash = self.nodes[0].getbestblockhash()
 
-        assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
+        assert_equal(self.nodes[1].getbalance(), Decimal("0.1"))
+                     # balance now should be 0.1 on node 1
 
         # load the latest 0.1 tx over the REST API
-        json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
+        json_string = http_get_call(
+            url.hostname, url.port, '/rest/tx/' + txid + self.FORMAT_SEPARATOR + "json")
         json_obj = json.loads(json_string)
-        vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
+        vintx = json_obj['vin'][0]['txid']
+            # get the vin to later check for utxo (should be spent by then)
         # get n of 0.1 outpoint
         n = 0
         for vout in json_obj['vout']:
             if vout['value'] == 0.1:
                 n = vout['n']
 
-
-        ######################################
+        #
         # GETUTXOS: query a unspent outpoint #
-        ######################################
-        json_request = '/checkmempool/'+txid+'-'+str(n)
-        json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
+        #
+        json_request = '/checkmempool/' + txid + '-' + str(n)
+        json_string = http_get_call(
+            url.hostname, url.port, '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
         json_obj = json.loads(json_string)
 
-        #check chainTip response
+        # check chainTip response
         assert_equal(json_obj['chaintipHash'], bb_hash)
 
-        #make sure there is one utxo
+        # make sure there is one utxo
         assert_equal(len(json_obj['utxos']), 1)
         assert_equal(json_obj['utxos'][0]['value'], 0.1)
 
-
-        ################################################
+        #
         # GETUTXOS: now query a already spent outpoint #
-        ################################################
-        json_request = '/checkmempool/'+vintx+'-0'
-        json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
+        #
+        json_request = '/checkmempool/' + vintx + '-0'
+        json_string = http_get_call(
+            url.hostname, url.port, '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
         json_obj = json.loads(json_string)
 
-        #check chainTip response
+        # check chainTip response
         assert_equal(json_obj['chaintipHash'], bb_hash)
 
-        #make sure there is no utox in the response because this oupoint has been spent
+        # make sure there is no utox in the response because this oupoint has
+        # been spent
         assert_equal(len(json_obj['utxos']), 0)
 
-        #check bitmap
+        # check bitmap
         assert_equal(json_obj['bitmap'], "0")
 
-
-        ##################################################
+        #
         # GETUTXOS: now check both with the same request #
-        ##################################################
-        json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
-        json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
+        #
+        json_request = '/checkmempool/' + \
+            txid + '-' + str(n) + '/' + vintx + '-0'
+        json_string = http_get_call(
+            url.hostname, url.port, '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
         json_obj = json.loads(json_string)
         assert_equal(len(json_obj['utxos']), 1)
         assert_equal(json_obj['bitmap'], "10")
 
-        #test binary response
+        # test binary response
         bb_hash = self.nodes[0].getbestblockhash()
 
         binaryRequest = b'\x01\x02'
@@ -140,169 +151,216 @@
         binaryRequest += hex_str_to_bytes(vintx)
         binaryRequest += pack("i", 0)
 
-        bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
+        bin_response = http_post_call(
+            url.hostname, url.port, '/rest/getutxos' + self.FORMAT_SEPARATOR + 'bin', binaryRequest)
         output = BytesIO()
         output.write(bin_response)
         output.seek(0)
         chainHeight = unpack("i", output.read(4))[0]
         hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(64)
 
-        assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
-        assert_equal(chainHeight, 102) #chain height must be 102
-
+        assert_equal(bb_hash, hashFromBinResponse)
+                     # check if getutxo's chaintip during calculation was fine
+        assert_equal(chainHeight, 102)  # chain height must be 102
 
-        ############################
+        #
         # GETUTXOS: mempool checks #
-        ############################
+        #
 
         # do a tx and don't sync
         txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
-        json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
+        json_string = http_get_call(
+            url.hostname, url.port, '/rest/tx/' + txid + self.FORMAT_SEPARATOR + "json")
         json_obj = json.loads(json_string)
-        vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
+        vintx = json_obj['vin'][0]['txid']
+            # get the vin to later check for utxo (should be spent by then)
         # get n of 0.1 outpoint
         n = 0
         for vout in json_obj['vout']:
             if vout['value'] == 0.1:
                 n = vout['n']
 
-        json_request = '/'+txid+'-'+str(n)
-        json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
+        json_request = '/' + txid + '-' + str(n)
+        json_string = http_get_call(
+            url.hostname, url.port, '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
         json_obj = json.loads(json_string)
-        assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
+        assert_equal(len(json_obj['utxos']), 0)
+                     # there should be a outpoint because it has just added to
+                     # the mempool
 
-        json_request = '/checkmempool/'+txid+'-'+str(n)
-        json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
+        json_request = '/checkmempool/' + txid + '-' + str(n)
+        json_string = http_get_call(
+            url.hostname, url.port, '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
         json_obj = json.loads(json_string)
-        assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
+        assert_equal(len(json_obj['utxos']), 1)
+                     # there should be a outpoint because it has just added to
+                     # the mempool
 
-        #do some invalid requests
+        # do some invalid requests
         json_request = '{"checkmempool'
-        response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
-        assert_equal(response.status, 400) #must be a 400 because we send a invalid json request
+        response = http_post_call(
+            url.hostname, url.port, '/rest/getutxos' + self.FORMAT_SEPARATOR + 'json', json_request, True)
+        assert_equal(response.status, 400)
+                     # must be a 400 because we send a invalid json request
 
         json_request = '{"checkmempool'
-        response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
-        assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request
+        response = http_post_call(
+            url.hostname, url.port, '/rest/getutxos' + self.FORMAT_SEPARATOR + 'bin', json_request, True)
+        assert_equal(response.status, 400)
+                     # must be a 400 because we send a invalid bin request
 
-        response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
-        assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request
+        response = http_post_call(
+            url.hostname, url.port, '/rest/getutxos/checkmempool' + self.FORMAT_SEPARATOR + 'bin', '', True)
+        assert_equal(response.status, 400)
+                     # must be a 400 because we send a invalid bin request
 
-        #test limits
+        # test limits
         json_request = '/checkmempool/'
         for x in range(0, 20):
-            json_request += txid+'-'+str(n)+'/'
+            json_request += txid + '-' + str(n) + '/'
         json_request = json_request.rstrip("/")
-        response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
-        assert_equal(response.status, 400) #must be a 400 because we exceeding the limits
+        response = http_post_call(
+            url.hostname, url.port, '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json', '', True)
+        assert_equal(response.status, 400)
+                     # must be a 400 because we exceeding the limits
 
         json_request = '/checkmempool/'
         for x in range(0, 15):
-            json_request += txid+'-'+str(n)+'/'
+            json_request += txid + '-' + str(n) + '/'
         json_request = json_request.rstrip("/")
-        response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
-        assert_equal(response.status, 200) #must be a 200 because we are within the limits
+        response = http_post_call(
+            url.hostname, url.port, '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json', '', True)
+        assert_equal(response.status, 200)
+                     # must be a 200 because we are within the limits
 
-        self.nodes[0].generate(1) #generate block to not affect upcoming tests
+        self.nodes[0].generate(
+            1)  # generate block to not affect upcoming tests
         self.sync_all()
 
-        ################
+        #
         # /rest/block/ #
-        ################
+        #
 
         # check binary format
-        response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
+        response = http_get_call(
+            url.hostname, url.port, '/rest/block/' + bb_hash + self.FORMAT_SEPARATOR + "bin", True)
         assert_equal(response.status, 200)
         assert_greater_than(int(response.getheader('content-length')), 80)
         response_str = response.read()
 
         # compare with block header
-        response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
+        response_header = http_get_call(
+            url.hostname, url.port, '/rest/headers/1/' + bb_hash + self.FORMAT_SEPARATOR + "bin", True)
         assert_equal(response_header.status, 200)
         assert_equal(int(response_header.getheader('content-length')), 80)
         response_header_str = response_header.read()
         assert_equal(response_str[0:80], response_header_str)
 
         # check block hex format
-        response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
+        response_hex = http_get_call(
+            url.hostname, url.port, '/rest/block/' + bb_hash + self.FORMAT_SEPARATOR + "hex", True)
         assert_equal(response_hex.status, 200)
         assert_greater_than(int(response_hex.getheader('content-length')), 160)
         response_hex_str = response_hex.read()
-        assert_equal(encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])
+        assert_equal(
+            encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])
 
         # compare with hex block header
-        response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
+        response_header_hex = http_get_call(
+            url.hostname, url.port, '/rest/headers/1/' + bb_hash + self.FORMAT_SEPARATOR + "hex", True)
         assert_equal(response_header_hex.status, 200)
-        assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
+        assert_greater_than(
+            int(response_header_hex.getheader('content-length')), 160)
         response_header_hex_str = response_header_hex.read()
         assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
-        assert_equal(encode(response_header_str, "hex_codec")[0:160], response_header_hex_str[0:160])
+        assert_equal(encode(response_header_str, "hex_codec")[
+                     0:160], response_header_hex_str[0:160])
 
         # check json format
-        block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
+        block_json_string = http_get_call(
+            url.hostname, url.port, '/rest/block/' + bb_hash + self.FORMAT_SEPARATOR + 'json')
         block_json_obj = json.loads(block_json_string)
         assert_equal(block_json_obj['hash'], bb_hash)
 
         # compare with json block header
-        response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
+        response_header_json = http_get_call(
+            url.hostname, url.port, '/rest/headers/1/' + bb_hash + self.FORMAT_SEPARATOR + "json", True)
         assert_equal(response_header_json.status, 200)
         response_header_json_str = response_header_json.read().decode('utf-8')
         json_obj = json.loads(response_header_json_str, parse_float=Decimal)
-        assert_equal(len(json_obj), 1) #ensure that there is one header in the json response
-        assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same
+        assert_equal(len(json_obj), 1)
+                     # ensure that there is one header in the json response
+        assert_equal(json_obj[0]['hash'], bb_hash)
+                     # request/response hash should be the same
 
-        #compare with normal RPC block response
+        # compare with normal RPC block response
         rpc_block_json = self.nodes[0].getblock(bb_hash)
         assert_equal(json_obj[0]['hash'],               rpc_block_json['hash'])
-        assert_equal(json_obj[0]['confirmations'],      rpc_block_json['confirmations'])
-        assert_equal(json_obj[0]['height'],             rpc_block_json['height'])
-        assert_equal(json_obj[0]['version'],            rpc_block_json['version'])
-        assert_equal(json_obj[0]['merkleroot'],         rpc_block_json['merkleroot'])
+        assert_equal(
+            json_obj[0]['confirmations'],      rpc_block_json['confirmations'])
+        assert_equal(
+            json_obj[0]['height'],             rpc_block_json['height'])
+        assert_equal(
+            json_obj[0]['version'],            rpc_block_json['version'])
+        assert_equal(
+            json_obj[0]['merkleroot'],         rpc_block_json['merkleroot'])
         assert_equal(json_obj[0]['time'],               rpc_block_json['time'])
-        assert_equal(json_obj[0]['nonce'],              rpc_block_json['nonce'])
+        assert_equal(
+            json_obj[0]['nonce'],              rpc_block_json['nonce'])
         assert_equal(json_obj[0]['bits'],               rpc_block_json['bits'])
-        assert_equal(json_obj[0]['difficulty'],         rpc_block_json['difficulty'])
-        assert_equal(json_obj[0]['chainwork'],          rpc_block_json['chainwork'])
-        assert_equal(json_obj[0]['previousblockhash'],  rpc_block_json['previousblockhash'])
-
-        #see if we can get 5 headers in one response
+        assert_equal(
+            json_obj[0]['difficulty'],         rpc_block_json['difficulty'])
+        assert_equal(
+            json_obj[0]['chainwork'],          rpc_block_json['chainwork'])
+        assert_equal(
+            json_obj[0]['previousblockhash'],  rpc_block_json['previousblockhash'])
+
+        # see if we can get 5 headers in one response
         self.nodes[1].generate(5)
         self.sync_all()
-        response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
+        response_header_json = http_get_call(
+            url.hostname, url.port, '/rest/headers/5/' + bb_hash + self.FORMAT_SEPARATOR + "json", True)
         assert_equal(response_header_json.status, 200)
         response_header_json_str = response_header_json.read().decode('utf-8')
         json_obj = json.loads(response_header_json_str)
-        assert_equal(len(json_obj), 5) #now we should have 5 header objects
+        assert_equal(len(json_obj), 5)  # now we should have 5 header objects
 
         # do tx test
         tx_hash = block_json_obj['tx'][0]['txid']
-        json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
+        json_string = http_get_call(
+            url.hostname, url.port, '/rest/tx/' + tx_hash + self.FORMAT_SEPARATOR + "json")
         json_obj = json.loads(json_string)
         assert_equal(json_obj['txid'], tx_hash)
 
         # check hex format response
-        hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
+        hex_string = http_get_call(
+            url.hostname, url.port, '/rest/tx/' + tx_hash + self.FORMAT_SEPARATOR + "hex", True)
         assert_equal(hex_string.status, 200)
         assert_greater_than(int(response.getheader('content-length')), 10)
 
-
         # check block tx details
         # let's make 3 tx and mine them on node 1
         txs = []
-        txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
-        txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
-        txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
+        txs.append(self.nodes[0].sendtoaddress(
+            self.nodes[2].getnewaddress(), 11))
+        txs.append(self.nodes[0].sendtoaddress(
+            self.nodes[2].getnewaddress(), 11))
+        txs.append(self.nodes[0].sendtoaddress(
+            self.nodes[2].getnewaddress(), 11))
         self.sync_all()
 
-        # check that there are exactly 3 transactions in the TX memory pool before generating the block
-        json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json')
+        # check that there are exactly 3 transactions in the TX memory pool
+        # before generating the block
+        json_string = http_get_call(
+            url.hostname, url.port, '/rest/mempool/info' + self.FORMAT_SEPARATOR + 'json')
         json_obj = json.loads(json_string)
         assert_equal(json_obj['size'], 3)
         # the size of the memory pool should be greater than 3x ~100 bytes
         assert_greater_than(json_obj['bytes'], 300)
 
         # check that there are our submitted transactions in the TX memory pool
-        json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json')
+        json_string = http_get_call(
+            url.hostname, url.port, '/rest/mempool/contents' + self.FORMAT_SEPARATOR + 'json')
         json_obj = json.loads(json_string)
         for tx in txs:
             assert_equal(tx in json_obj, True)
@@ -311,25 +369,28 @@
         newblockhash = self.nodes[1].generate(1)
         self.sync_all()
 
-        #check if the 3 tx show up in the new block
-        json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
+        # check if the 3 tx show up in the new block
+        json_string = http_get_call(
+            url.hostname, url.port, '/rest/block/' + newblockhash[0] + self.FORMAT_SEPARATOR + 'json')
         json_obj = json.loads(json_string)
         for tx in json_obj['tx']:
-            if not 'coinbase' in tx['vin'][0]: #exclude coinbase
+            if not 'coinbase' in tx['vin'][0]:  # exclude coinbase
                 assert_equal(tx['txid'] in txs, True)
 
-        #check the same but without tx details
-        json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
+        # check the same but without tx details
+        json_string = http_get_call(
+            url.hostname, url.port, '/rest/block/notxdetails/' + newblockhash[0] + self.FORMAT_SEPARATOR + 'json')
         json_obj = json.loads(json_string)
         for tx in txs:
             assert_equal(tx in json_obj['tx'], True)
 
-        #test rest bestblock
+        # test rest bestblock
         bb_hash = self.nodes[0].getbestblockhash()
 
-        json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
+        json_string = http_get_call(
+            url.hostname, url.port, '/rest/chaininfo.json')
         json_obj = json.loads(json_string)
         assert_equal(json_obj['bestblockhash'], bb_hash)
 
 if __name__ == '__main__':
-    RESTTest ().main ()
+    RESTTest().main()
diff --git a/qa/rpc-tests/rpcbind_test.py b/qa/rpc-tests/rpcbind_test.py
--- a/qa/rpc-tests/rpcbind_test.py
+++ b/qa/rpc-tests/rpcbind_test.py
@@ -33,8 +33,9 @@
         base_args = ['-disablewallet', '-nolisten']
         if allow_ips:
             base_args += ['-rpcallowip=' + x for x in allow_ips]
-        binds = ['-rpcbind='+addr for addr in addresses]
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [base_args + binds], connect_to)
+        binds = ['-rpcbind=' + addr for addr in addresses]
+        self.nodes = start_nodes(
+            self.num_nodes, self.options.tmpdir, [base_args + binds], connect_to)
         pid = bitcoind_processes[0].pid
         assert_equal(set(get_bind_addrs(pid)), set(expected))
         stop_nodes(self.nodes)
@@ -44,51 +45,60 @@
         Start a node with rpcallow IP, and request getnetworkinfo
         at a non-localhost IP.
         '''
-        base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [base_args])
+        base_args = ['-disablewallet', '-nolisten'] + [
+            '-rpcallowip=' + x for x in allow_ips]
+        self.nodes = start_nodes(
+            self.num_nodes, self.options.tmpdir, [base_args])
         # connect to node through non-loopback interface
         node = get_rpc_proxy(rpc_url(0, "%s:%d" % (rpchost, rpcport)), 0)
         node.getnetworkinfo()
         stop_nodes(self.nodes)
 
     def run_test(self):
-        # due to OS-specific network stats queries, this test works only on Linux
+        # due to OS-specific network stats queries, this test works only on
+        # Linux
         assert(sys.platform.startswith('linux'))
         # find the first non-loopback interface for testing
         non_loopback_ip = None
-        for name,ip in all_interfaces():
+        for name, ip in all_interfaces():
             if ip != '127.0.0.1':
                 non_loopback_ip = ip
                 break
         if non_loopback_ip is None:
-            assert(not 'This test requires at least one non-loopback IPv4 interface')
+            assert(
+                not 'This test requires at least one non-loopback IPv4 interface')
         print("Using interface %s for testing" % non_loopback_ip)
 
         defaultport = rpc_port(0)
 
         # check default without rpcallowip (IPv4 and IPv6 localhost)
         self.run_bind_test(None, '127.0.0.1', [],
-            [('127.0.0.1', defaultport), ('::1', defaultport)])
+                           [('127.0.0.1', defaultport), ('::1', defaultport)])
         # check default with rpcallowip (IPv6 any)
         self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
-            [('::0', defaultport)])
+                           [('::0', defaultport)])
         # check only IPv4 localhost (explicit)
         self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
-            [('127.0.0.1', defaultport)])
+                           [('127.0.0.1', defaultport)])
         # check only IPv4 localhost (explicit) with alternative port
-        self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
+        self.run_bind_test(
+            ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
             [('127.0.0.1', 32171)])
-        # check only IPv4 localhost (explicit) with multiple alternative ports on same host
-        self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
+        # check only IPv4 localhost (explicit) with multiple alternative ports
+        # on same host
+        self.run_bind_test(
+            ['127.0.0.1'], '127.0.0.1:32171', [
+                '127.0.0.1:32171', '127.0.0.1:32172'],
             [('127.0.0.1', 32171), ('127.0.0.1', 32172)])
         # check only IPv6 localhost (explicit)
         self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
-            [('::1', defaultport)])
+                           [('::1', defaultport)])
         # check both IPv4 and IPv6 localhost (explicit)
         self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
-            [('127.0.0.1', defaultport), ('::1', defaultport)])
+                           [('127.0.0.1', defaultport), ('::1', defaultport)])
         # check only non-loopback interface
-        self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip],
+        self.run_bind_test(
+            [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
             [(non_loopback_ip, defaultport)])
 
         # Check that with invalid rpcallowip, we are denied
diff --git a/qa/rpc-tests/rpcnamedargs.py b/qa/rpc-tests/rpcnamedargs.py
--- a/qa/rpc-tests/rpcnamedargs.py
+++ b/qa/rpc-tests/rpcnamedargs.py
@@ -18,6 +18,7 @@
 
 
 class NamedArgumentTest(BitcoinTestFramework):
+
     """
     Test named arguments on RPC calls.
     """
@@ -37,16 +38,18 @@
         h = node.help(command='getinfo')
         assert(h.startswith('getinfo\n'))
 
-        assert_raises_jsonrpc(-8, 'Unknown named parameter', node.help, random='getinfo')
+        assert_raises_jsonrpc(
+            -8, 'Unknown named parameter', node.help, random='getinfo')
 
         h = node.getblockhash(height=0)
         node.getblock(blockhash=h)
 
         assert_equal(node.echo(), [])
-        assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
+        assert_equal(node.echo(arg0=0, arg9=9), [0] + [None] * 8 + [9])
         assert_equal(node.echo(arg1=1), [None, 1])
-        assert_equal(node.echo(arg9=None), [None]*10)
-        assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
+        assert_equal(node.echo(arg9=None), [None] * 10)
+        assert_equal(node.echo(arg0=0, arg3=3, arg9=9),
+                     [0] + [None] * 2 + [3] + [None] * 5 + [9])
 
 if __name__ == '__main__':
     NamedArgumentTest().main()
diff --git a/qa/rpc-tests/sendheaders.py b/qa/rpc-tests/sendheaders.py
--- a/qa/rpc-tests/sendheaders.py
+++ b/qa/rpc-tests/sendheaders.py
@@ -11,7 +11,7 @@
 '''
 SendHeadersTest -- test behavior of headers messages to announce blocks.
 
-Setup: 
+Setup:
 
 - Two nodes, two p2p connections to node0. One p2p connection should only ever
   receive inv's (omitted from testing description below, this is our control).
@@ -82,7 +82,9 @@
 
 direct_fetch_response_time = 0.05
 
+
 class BaseNode(SingleNodeConnCB):
+
     def __init__(self):
         SingleNodeConnCB.__init__(self)
         self.last_inv = None
@@ -164,7 +166,7 @@
             hash_headers = []
             if self.last_headers != None:
                 # treat headers as a list of block hashes
-                hash_headers = [ x.sha256 for x in self.last_headers.headers ]
+                hash_headers = [x.sha256 for x in self.last_headers.headers]
             if hash_headers != expect_headers:
                 success = False
 
@@ -187,7 +189,8 @@
         if hash_list == []:
             return
 
-        test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
+        test_function = lambda: self.last_getdata != None and [
+            x.hash for x in self.last_getdata.inv] == hash_list
         assert(wait_until(test_function, timeout=timeout))
         return
 
@@ -203,7 +206,7 @@
 
     def send_header_for_blocks(self, new_blocks):
         headers_message = msg_headers()
-        headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
+        headers_message.headers = [CBlockHeader(b) for b in new_blocks]
         self.send_message(headers_message)
 
     def send_getblocks(self, locator):
@@ -213,16 +216,24 @@
 
 # InvNode: This peer should only ever receive inv's, because it doesn't ever send a
 # "sendheaders" message.
+
+
 class InvNode(BaseNode):
+
     def __init__(self):
         BaseNode.__init__(self)
 
 # TestNode: This peer is the one we use for most of the testing.
+
+
 class TestNode(BaseNode):
+
     def __init__(self):
         BaseNode.__init__(self)
 
+
 class SendHeadersTest(BitcoinTestFramework):
+
     def __init__(self):
         super().__init__()
         self.setup_clean_chain = True
@@ -230,13 +241,14 @@
 
     def setup_network(self):
         self.nodes = []
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2)
+        self.nodes = start_nodes(
+            self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1"]] * 2)
         connect_nodes(self.nodes[0], 1)
 
     # mine count blocks and return the new tip
     def mine_blocks(self, count):
         # Clear out last block announcement from each p2p listener
-        [ x.clear_last_announcement() for x in self.p2p_connections ]
+        [x.clear_last_announcement() for x in self.p2p_connections]
         self.nodes[0].generate(count)
         return int(self.nodes[0].getbestblockhash(), 16)
 
@@ -246,16 +258,20 @@
     # to-be-reorged-out blocks are mined, so that we don't break later tests.
     # return the list of block hashes newly mined
     def mine_reorg(self, length):
-        self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
+        self.nodes[0].generate(
+            length)  # make sure all invalidated blocks are node0's
         sync_blocks(self.nodes, wait=0.1)
         for x in self.p2p_connections:
-            x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
+            x.wait_for_block_announcement(
+                int(self.nodes[0].getbestblockhash(), 16))
             x.clear_last_announcement()
 
         tip_height = self.nodes[1].getblockcount()
-        hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
+        hash_to_invalidate = self.nodes[
+            1].getblockhash(tip_height - (length - 1))
         self.nodes[1].invalidateblock(hash_to_invalidate)
-        all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
+        all_hashes = self.nodes[1].generate(
+            length + 1)  # Must be longer than the orig chain
         sync_blocks(self.nodes, wait=0.1)
         return [int(x, 16) for x in all_hashes]
 
@@ -267,14 +283,16 @@
         self.p2p_connections = [inv_node, test_node]
 
         connections = []
-        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
+        connections.append(
+            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
         # Set nServices to 0 for test_node, so no block download will occur outside of
         # direct fetching
-        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
+        connections.append(
+            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
         inv_node.add_connection(connections[0])
         test_node.add_connection(connections[1])
 
-        NetworkThread().start() # Start up network handling in another thread
+        NetworkThread().start()  # Start up network handling in another thread
 
         # Test logic begins here
         inv_node.wait_for_verack()
@@ -290,7 +308,8 @@
             tip = self.mine_blocks(1)
             assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
             assert_equal(test_node.check_last_announcement(inv=[tip]), True)
-            # Try a few different responses; none should affect next announcement
+            # Try a few different responses; none should affect next
+            # announcement
             if i == 0:
                 # first request the block
                 test_node.get_data([tip])
@@ -300,23 +319,27 @@
                 test_node.get_headers(locator=[old_tip], hashstop=tip)
                 test_node.get_data([tip])
                 test_node.wait_for_block(tip)
-                test_node.clear_last_announcement() # since we requested headers...
+                test_node.clear_last_announcement(
+                )  # since we requested headers...
             elif i == 2:
                 # this time announce own block via headers
                 height = self.nodes[0].getblockcount()
-                last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
+                last_time = self.nodes[0].getblock(
+                    self.nodes[0].getbestblockhash())['time']
                 block_time = last_time + 1
-                new_block = create_block(tip, create_coinbase(height+1), block_time)
+                new_block = create_block(
+                    tip, create_coinbase(height + 1), block_time)
                 new_block.solve()
                 test_node.send_header_for_blocks([new_block])
                 test_node.wait_for_getdata([new_block.sha256], timeout=5)
                 test_node.send_message(msg_block(new_block))
-                test_node.sync_with_ping() # make sure this block is processed
+                test_node.sync_with_ping()  # make sure this block is processed
                 inv_node.clear_last_announcement()
                 test_node.clear_last_announcement()
 
         print("Part 1: success!")
-        print("Part 2: announce blocks with headers after sendheaders message...")
+        print(
+            "Part 2: announce blocks with headers after sendheaders message...")
         # PART 2
         # 2. Send a sendheaders message and test that headers announcements
         # commence and keep working.
@@ -330,7 +353,7 @@
         assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
         assert_equal(test_node.check_last_announcement(headers=[tip]), True)
 
-        height = self.nodes[0].getblockcount()+1
+        height = self.nodes[0].getblockcount() + 1
         block_time += 10  # Advance far enough ahead
         for i in range(10):
             # Mine i blocks, and alternate announcing either via
@@ -339,8 +362,9 @@
             # with block header, even though the blocks are never requested
             for j in range(2):
                 blocks = []
-                for b in range(i+1):
-                    blocks.append(create_block(tip, create_coinbase(height), block_time))
+                for b in range(i + 1):
+                    blocks.append(
+                        create_block(tip, create_coinbase(height), block_time))
                     blocks[-1].solve()
                     tip = blocks[-1].sha256
                     block_time += 1
@@ -353,18 +377,20 @@
                     test_node.send_header_for_blocks(blocks)
                     # Test that duplicate inv's won't result in duplicate
                     # getdata requests, or duplicate headers announcements
-                    [ inv_node.send_block_inv(x.sha256) for x in blocks ]
-                    test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
+                    [inv_node.send_block_inv(x.sha256) for x in blocks]
+                    test_node.wait_for_getdata(
+                        [x.sha256 for x in blocks], timeout=5)
                     inv_node.sync_with_ping()
                 else:
                     # Announce via headers
                     test_node.send_header_for_blocks(blocks)
-                    test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
+                    test_node.wait_for_getdata(
+                        [x.sha256 for x in blocks], timeout=5)
                     # Test that duplicate headers won't result in duplicate
                     # getdata requests (the check is further down)
                     inv_node.send_header_for_blocks(blocks)
                     inv_node.sync_with_ping()
-                [ test_node.send_message(msg_block(x)) for x in blocks ]
+                [test_node.send_message(msg_block(x)) for x in blocks]
                 test_node.sync_with_ping()
                 inv_node.sync_with_ping()
                 # This block should not be announced to the inv node (since it also
@@ -373,26 +399,31 @@
                 assert_equal(inv_node.last_headers, None)
                 tip = self.mine_blocks(1)
                 assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
-                assert_equal(test_node.check_last_announcement(headers=[tip]), True)
+                assert_equal(
+                    test_node.check_last_announcement(headers=[tip]), True)
                 height += 1
                 block_time += 1
 
         print("Part 2: success!")
 
-        print("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
+        print(
+            "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
 
         # PART 3.  Headers announcements can stop after large reorg, and resume after
         # getheaders or inv from peer.
         for j in range(2):
-            # First try mining a reorg that can propagate with header announcement
+            # First try mining a reorg that can propagate with header
+            # announcement
             new_block_hashes = self.mine_reorg(length=7)
             tip = new_block_hashes[-1]
             assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
-            assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
+            assert_equal(
+                test_node.check_last_announcement(headers=new_block_hashes), True)
 
-            block_time += 8 
+            block_time += 8
 
-            # Mine a too-large reorg, which should be announced with a single inv
+            # Mine a too-large reorg, which should be announced with a single
+            # inv
             new_block_hashes = self.mine_reorg(length=8)
             tip = new_block_hashes[-1]
             assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
@@ -400,12 +431,14 @@
 
             block_time += 9
 
-            fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
+            fork_point = self.nodes[0].getblock(
+                "%02x" % new_block_hashes[0])["previousblockhash"]
             fork_point = int(fork_point, 16)
 
             # Use getblocks/getdata
-            test_node.send_getblocks(locator = [fork_point])
-            assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
+            test_node.send_getblocks(locator=[fork_point])
+            assert_equal(
+                test_node.check_last_announcement(inv=new_block_hashes), True)
             test_node.get_data(new_block_hashes)
             test_node.wait_for_block(new_block_hashes[-1])
 
@@ -413,22 +446,25 @@
                 # Mine another block, still should get only an inv
                 tip = self.mine_blocks(1)
                 assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
-                assert_equal(test_node.check_last_announcement(inv=[tip]), True)
+                assert_equal(
+                    test_node.check_last_announcement(inv=[tip]), True)
                 if i == 0:
-                    # Just get the data -- shouldn't cause headers announcements to resume
+                    # Just get the data -- shouldn't cause headers
+                    # announcements to resume
                     test_node.get_data([tip])
                     test_node.wait_for_block(tip)
                 elif i == 1:
                     # Send a getheaders message that shouldn't trigger headers announcements
                     # to resume (best header sent will be too old)
-                    test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
+                    test_node.get_headers(locator=[
+                                          fork_point], hashstop=new_block_hashes[1])
                     test_node.get_data([tip])
                     test_node.wait_for_block(tip)
                 elif i == 2:
                     test_node.get_data([tip])
                     test_node.wait_for_block(tip)
                     # This time, try sending either a getheaders to trigger resumption
-                    # of headers announcements, or mine a new block and inv it, also 
+                    # of headers announcements, or mine a new block and inv it, also
                     # triggering resumption of headers announcements.
                     if j == 0:
                         test_node.get_headers(locator=[tip], hashstop=0)
@@ -439,27 +475,30 @@
             # New blocks should now be announced with header
             tip = self.mine_blocks(1)
             assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
-            assert_equal(test_node.check_last_announcement(headers=[tip]), True)
+            assert_equal(
+                test_node.check_last_announcement(headers=[tip]), True)
 
         print("Part 3: success!")
 
         print("Part 4: Testing direct fetch behavior...")
         tip = self.mine_blocks(1)
         height = self.nodes[0].getblockcount() + 1
-        last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
+        last_time = self.nodes[0].getblock(
+            self.nodes[0].getbestblockhash())['time']
         block_time = last_time + 1
 
         # Create 2 blocks.  Send the blocks, then send the headers.
         blocks = []
         for b in range(2):
-            blocks.append(create_block(tip, create_coinbase(height), block_time))
+            blocks.append(
+                create_block(tip, create_coinbase(height), block_time))
             blocks[-1].solve()
             tip = blocks[-1].sha256
             block_time += 1
             height += 1
             inv_node.send_message(msg_block(blocks[-1]))
 
-        inv_node.sync_with_ping() # Make sure blocks are processed
+        inv_node.sync_with_ping()  # Make sure blocks are processed
         test_node.last_getdata = None
         test_node.send_header_for_blocks(blocks)
         test_node.sync_with_ping()
@@ -470,7 +509,8 @@
         # This time, direct fetch should work
         blocks = []
         for b in range(3):
-            blocks.append(create_block(tip, create_coinbase(height), block_time))
+            blocks.append(
+                create_block(tip, create_coinbase(height), block_time))
             blocks[-1].solve()
             tip = blocks[-1].sha256
             block_time += 1
@@ -478,9 +518,10 @@
 
         test_node.send_header_for_blocks(blocks)
         test_node.sync_with_ping()
-        test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time)
+        test_node.wait_for_getdata(
+            [x.sha256 for x in blocks], timeout=direct_fetch_response_time)
 
-        [ test_node.send_message(msg_block(x)) for x in blocks ]
+        [test_node.send_message(msg_block(x)) for x in blocks]
 
         test_node.sync_with_ping()
 
@@ -491,7 +532,8 @@
 
         # Create extra blocks for later
         for b in range(20):
-            blocks.append(create_block(tip, create_coinbase(height), block_time))
+            blocks.append(
+                create_block(tip, create_coinbase(height), block_time))
             blocks[-1].solve()
             tip = blocks[-1].sha256
             block_time += 1
@@ -509,13 +551,15 @@
         # both blocks (same work as tip)
         test_node.send_header_for_blocks(blocks[1:2])
         test_node.sync_with_ping()
-        test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
+        test_node.wait_for_getdata(
+            [x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
 
         # Announcing 16 more headers should trigger direct fetch for 14 more
         # blocks
         test_node.send_header_for_blocks(blocks[2:18])
         test_node.sync_with_ping()
-        test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
+        test_node.wait_for_getdata(
+            [x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
 
         # Announcing 1 more header should not trigger any response
         test_node.last_getdata = None
@@ -527,7 +571,7 @@
         print("Part 4: success!")
 
         # Now deliver all those blocks we announced.
-        [ test_node.send_message(msg_block(x)) for x in blocks ]
+        [test_node.send_message(msg_block(x)) for x in blocks]
 
         print("Part 5: Testing handling of unconnecting headers")
         # First we test that receipt of an unconnecting header doesn't prevent
@@ -537,7 +581,8 @@
             blocks = []
             # Create two more blocks.
             for j in range(2):
-                blocks.append(create_block(tip, create_coinbase(height), block_time))
+                blocks.append(
+                    create_block(tip, create_coinbase(height), block_time))
                 blocks[-1].solve()
                 tip = blocks[-1].sha256
                 block_time += 1
@@ -549,23 +594,26 @@
             test_node.wait_for_getheaders(timeout=1)
             test_node.send_header_for_blocks(blocks)
             test_node.wait_for_getdata([x.sha256 for x in blocks])
-            [ test_node.send_message(msg_block(x)) for x in blocks ]
+            [test_node.send_message(msg_block(x)) for x in blocks]
             test_node.sync_with_ping()
-            assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
+            assert_equal(
+                int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
 
         blocks = []
         # Now we test that if we repeatedly don't send connecting headers, we
         # don't go into an infinite loop trying to get them to connect.
         MAX_UNCONNECTING_HEADERS = 10
-        for j in range(MAX_UNCONNECTING_HEADERS+1):
-            blocks.append(create_block(tip, create_coinbase(height), block_time))
+        for j in range(MAX_UNCONNECTING_HEADERS + 1):
+            blocks.append(
+                create_block(tip, create_coinbase(height), block_time))
             blocks[-1].solve()
             tip = blocks[-1].sha256
             block_time += 1
             height += 1
 
         for i in range(1, MAX_UNCONNECTING_HEADERS):
-            # Send a header that doesn't connect, check that we get a getheaders.
+            # Send a header that doesn't connect, check that we get a
+            # getheaders.
             with mininode_lock:
                 test_node.last_getheaders = None
             test_node.send_header_for_blocks([blocks[i]])
@@ -579,11 +627,12 @@
 
         # Now try to see how many unconnecting headers we can send
         # before we get disconnected.  Should be 5*MAX_UNCONNECTING_HEADERS
-        for i in range(5*MAX_UNCONNECTING_HEADERS - 1):
-            # Send a header that doesn't connect, check that we get a getheaders.
+        for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
+            # Send a header that doesn't connect, check that we get a
+            # getheaders.
             with mininode_lock:
                 test_node.last_getheaders = None
-            test_node.send_header_for_blocks([blocks[i%len(blocks)]])
+            test_node.send_header_for_blocks([blocks[i % len(blocks)]])
             test_node.wait_for_getheaders(timeout=1)
 
         # Eventually this stops working.
diff --git a/qa/rpc-tests/signmessages.py b/qa/rpc-tests/signmessages.py
--- a/qa/rpc-tests/signmessages.py
+++ b/qa/rpc-tests/signmessages.py
@@ -8,6 +8,7 @@
 
 
 class SignMessagesTest(BitcoinTestFramework):
+
     """Tests RPC commands for signing and verifying messages."""
 
     def __init__(self):
diff --git a/qa/rpc-tests/wallet-accounts.py b/qa/rpc-tests/wallet-accounts.py
--- a/qa/rpc-tests/wallet-accounts.py
+++ b/qa/rpc-tests/wallet-accounts.py
@@ -21,74 +21,75 @@
         self.node_args = [[]]
 
     def setup_network(self):
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.node_args)
+        self.nodes = start_nodes(
+            self.num_nodes, self.options.tmpdir, self.node_args)
         self.is_network_split = False
 
-    def run_test (self):
+    def run_test(self):
         node = self.nodes[0]
         # Check that there's no UTXO on any of the nodes
         assert_equal(len(node.listunspent()), 0)
-        
+
         node.generate(101)
-        
+
         assert_equal(node.getbalance(), 50)
-        
-        accounts = ["a","b","c","d","e"]
+
+        accounts = ["a", "b", "c", "d", "e"]
         amount_to_send = 1.0
         account_addresses = dict()
         for account in accounts:
             address = node.getaccountaddress(account)
             account_addresses[account] = address
-            
+
             node.getnewaddress(account)
             assert_equal(node.getaccount(address), account)
             assert(address in node.getaddressesbyaccount(account))
-            
+
             node.sendfrom("", address, amount_to_send)
-        
+
         node.generate(1)
-        
+
         for i in range(len(accounts)):
             from_account = accounts[i]
-            to_account = accounts[(i+1)%len(accounts)]
+            to_account = accounts[(i + 1) % len(accounts)]
             to_address = account_addresses[to_account]
             node.sendfrom(from_account, to_address, amount_to_send)
-        
+
         node.generate(1)
-        
+
         for account in accounts:
             address = node.getaccountaddress(account)
             assert(address != account_addresses[account])
             assert_equal(node.getreceivedbyaccount(account), 2)
             node.move(account, "", node.getbalance(account))
-        
+
         node.generate(101)
-        
+
         expected_account_balances = {"": 5200}
         for account in accounts:
             expected_account_balances[account] = 0
-        
+
         assert_equal(node.listaccounts(), expected_account_balances)
-        
+
         assert_equal(node.getbalance(""), 5200)
-        
+
         for account in accounts:
             address = node.getaccountaddress("")
             node.setaccount(address, account)
             assert(address in node.getaddressesbyaccount(account))
             assert(address not in node.getaddressesbyaccount(""))
-        
+
         for account in accounts:
             addresses = []
             for x in range(10):
                 addresses.append(node.getnewaddress())
             multisig_address = node.addmultisigaddress(5, addresses, account)
             node.sendfrom("", multisig_address, 50)
-        
+
         node.generate(101)
-        
+
         for account in accounts:
             assert_equal(node.getbalance(account), 50)
 
 if __name__ == '__main__':
-    WalletAccountsTest().main ()
+    WalletAccountsTest().main()
diff --git a/qa/rpc-tests/wallet-dump.py b/qa/rpc-tests/wallet-dump.py
--- a/qa/rpc-tests/wallet-dump.py
+++ b/qa/rpc-tests/wallet-dump.py
@@ -4,7 +4,8 @@
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import (start_nodes, start_node, assert_equal, bitcoind_processes)
+from test_framework.util import (
+    start_nodes, start_node, assert_equal, bitcoind_processes)
 
 
 def read_dump(file_name, addrs, hd_master_addr_old):
@@ -65,17 +66,19 @@
         # longer than the default 30 seconds due to an expensive
         # CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in
         # the test often takes even longer.
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, timewait=60)
+        self.nodes = start_nodes(
+            self.num_nodes, self.options.tmpdir, self.extra_args, timewait=60)
 
-    def run_test (self):
+    def run_test(self):
         tmpdir = self.options.tmpdir
 
         # generate 20 addresses to compare against the dump
         test_addr_count = 20
         addrs = []
-        for i in range(0,test_addr_count):
+        for i in range(0, test_addr_count):
             addr = self.nodes[0].getnewaddress()
-            vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
+            vaddr = self.nodes[0].validateaddress(
+                addr)  # required to get hd keypath
             addrs.append(vaddr)
         # Should be a no-op:
         self.nodes[0].keypoolrefill()
@@ -85,11 +88,13 @@
 
         found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
             read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
-        assert_equal(found_addr, test_addr_count)  # all keys must be in the dump
+        assert_equal(found_addr, test_addr_count)
+                     # all keys must be in the dump
         assert_equal(found_addr_chg, 50)  # 50 blocks where mined
-        assert_equal(found_addr_rsv, 90 + 1)  # keypool size (TODO: fix off-by-one)
+        assert_equal(found_addr_rsv, 90 + 1)
+                     # keypool size (TODO: fix off-by-one)
 
-        #encrypt wallet, restart, unlock and dump
+        # encrypt wallet, restart, unlock and dump
         self.nodes[0].encryptwallet('test')
         bitcoind_processes[0].wait()
         self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
@@ -99,10 +104,13 @@
         self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
 
         found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
-            read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
+            read_dump(
+                tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
         assert_equal(found_addr, test_addr_count)
-        assert_equal(found_addr_chg, 90 + 1 + 50)  # old reserve keys are marked as change now
-        assert_equal(found_addr_rsv, 90 + 1)  # keypool size (TODO: fix off-by-one)
+        assert_equal(found_addr_chg, 90 + 1 + 50)
+                     # old reserve keys are marked as change now
+        assert_equal(found_addr_rsv, 90 + 1)
+                     # keypool size (TODO: fix off-by-one)
 
 if __name__ == '__main__':
-    WalletDumpTest().main ()
+    WalletDumpTest().main()
diff --git a/qa/rpc-tests/wallet-hd.py b/qa/rpc-tests/wallet-hd.py
--- a/qa/rpc-tests/wallet-hd.py
+++ b/qa/rpc-tests/wallet-hd.py
@@ -23,11 +23,12 @@
         self.node_args = [['-usehd=0'], ['-usehd=1', '-keypool=0']]
 
     def setup_network(self):
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.node_args)
+        self.nodes = start_nodes(
+            self.num_nodes, self.options.tmpdir, self.node_args)
         self.is_network_split = False
         connect_nodes_bi(self.nodes, 0, 1)
 
-    def run_test (self):
+    def run_test(self):
         tmpdir = self.options.tmpdir
 
         # Make sure we use hd, keep masterkeyid
@@ -38,9 +39,9 @@
         non_hd_add = self.nodes[0].getnewaddress()
         self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
 
-        # This should be enough to keep the master key and the non-HD key 
+        # This should be enough to keep the master key and the non-HD key
         self.nodes[1].backupwallet(tmpdir + "/hd.bak")
-        #self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
+        # self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
 
         # Derive some HD addresses and remember the last
         # Also send funds to each add
@@ -50,7 +51,7 @@
         for i in range(num_hd_adds):
             hd_add = self.nodes[1].getnewaddress()
             hd_info = self.nodes[1].validateaddress(hd_add)
-            assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i+1)+"'")
+            assert_equal(hd_info["hdkeypath"], "m/0'/0'/" + str(i + 1) + "'")
             assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
             self.nodes[0].sendtoaddress(hd_add, 1)
             self.nodes[0].generate(1)
@@ -63,25 +64,27 @@
         print("Restore backup ...")
         self.stop_node(1)
         os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
-        shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
+        shutil.copyfile(
+            tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
         self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
-        #connect_nodes_bi(self.nodes, 0, 1)
+        # connect_nodes_bi(self.nodes, 0, 1)
 
         # Assert that derivation is deterministic
         hd_add_2 = None
         for _ in range(num_hd_adds):
             hd_add_2 = self.nodes[1].getnewaddress()
             hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
-            assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_+1)+"'")
+            assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/" + str(_ + 1) + "'")
             assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
         assert_equal(hd_add, hd_add_2)
 
         # Needs rescan
         self.stop_node(1)
-        self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1] + ['-rescan'])
-        #connect_nodes_bi(self.nodes, 0, 1)
+        self.nodes[1] = start_node(
+            1, self.options.tmpdir, self.node_args[1] + ['-rescan'])
+        # connect_nodes_bi(self.nodes, 0, 1)
         assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
 
 
 if __name__ == '__main__':
-    WalletHDTest().main ()
+    WalletHDTest().main()
diff --git a/qa/rpc-tests/walletbackup.py b/qa/rpc-tests/walletbackup.py
--- a/qa/rpc-tests/walletbackup.py
+++ b/qa/rpc-tests/walletbackup.py
@@ -37,7 +37,9 @@
 from test_framework.util import *
 from random import randint
 import logging
-logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO, stream=sys.stdout)
+logging.basicConfig(
+    format='%(levelname)s: %(message)s', level=logging.INFO, stream=sys.stdout)
+
 
 class WalletBackupTest(BitcoinTestFramework):
 
@@ -46,21 +48,23 @@
         self.setup_clean_chain = True
         self.num_nodes = 4
         # nodes 1, 2,3 are spenders, let's give them a keypool=100
-        self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
+        self.extra_args = [
+            ["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
 
     # This mirrors how the network was setup in the bash test
     def setup_network(self, split=False):
-        self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
+        self.nodes = start_nodes(
+            self.num_nodes, self.options.tmpdir, self.extra_args)
         connect_nodes(self.nodes[0], 3)
         connect_nodes(self.nodes[1], 3)
         connect_nodes(self.nodes[2], 3)
         connect_nodes(self.nodes[2], 0)
-        self.is_network_split=False
+        self.is_network_split = False
         self.sync_all()
 
     def one_send(self, from_node, to_address):
-        if (randint(1,2) == 1):
-            amount = Decimal(randint(1,10)) / Decimal(10)
+        if (randint(1, 2) == 1):
+            amount = Decimal(randint(1, 10)) / Decimal(10)
             self.nodes[from_node].sendtoaddress(to_address, amount)
 
     def do_one_round(self):
@@ -149,9 +153,9 @@
         # 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
         assert_equal(total, 5700)
 
-        ##
+        #
         # Test restoring spender wallets from backups
-        ##
+        #
         logging.info("Restoring using wallet.dat")
         self.stop_three()
         self.erase_three()
@@ -161,9 +165,12 @@
         shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
 
         # Restore wallets from backup
-        shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
-        shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
-        shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
+        shutil.copyfile(
+            tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
+        shutil.copyfile(
+            tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
+        shutil.copyfile(
+            tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
 
         logging.info("Re-starting nodes")
         self.start_three()
@@ -177,7 +184,7 @@
         self.stop_three()
         self.erase_three()
 
-        #start node2 with no chain
+        # start node2 with no chain
         shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
         shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
 
diff --git a/qa/rpc-tests/zapwallettxes.py b/qa/rpc-tests/zapwallettxes.py
--- a/qa/rpc-tests/zapwallettxes.py
+++ b/qa/rpc-tests/zapwallettxes.py
@@ -16,62 +16,66 @@
 
     def setup_network(self, split=False):
         self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
-        connect_nodes_bi(self.nodes,0,1)
-        connect_nodes_bi(self.nodes,1,2)
-        connect_nodes_bi(self.nodes,0,2)
-        self.is_network_split=False
+        connect_nodes_bi(self.nodes, 0, 1)
+        connect_nodes_bi(self.nodes, 1, 2)
+        connect_nodes_bi(self.nodes, 0, 2)
+        self.is_network_split = False
         self.sync_all()
 
-    def run_test (self):
+    def run_test(self):
         print("Mining blocks...")
         self.nodes[0].generate(1)
         self.sync_all()
         self.nodes[1].generate(101)
         self.sync_all()
-        
+
         assert_equal(self.nodes[0].getbalance(), 50)
-        
+
         txid0 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
         txid1 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
         self.sync_all()
         self.nodes[0].generate(1)
         self.sync_all()
-        
+
         txid2 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
         txid3 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
-        
+
         tx0 = self.nodes[0].gettransaction(txid0)
-        assert_equal(tx0['txid'], txid0) #tx0 must be available (confirmed)
-        
+        assert_equal(tx0['txid'], txid0)  # tx0 must be available (confirmed)
+
         tx1 = self.nodes[0].gettransaction(txid1)
-        assert_equal(tx1['txid'], txid1) #tx1 must be available (confirmed)
-        
+        assert_equal(tx1['txid'], txid1)  # tx1 must be available (confirmed)
+
         tx2 = self.nodes[0].gettransaction(txid2)
-        assert_equal(tx2['txid'], txid2) #tx2 must be available (unconfirmed)
-        
+        assert_equal(tx2['txid'], txid2)  # tx2 must be available (unconfirmed)
+
         tx3 = self.nodes[0].gettransaction(txid3)
-        assert_equal(tx3['txid'], txid3) #tx3 must be available (unconfirmed)
-        
-        #restart bitcoind
+        assert_equal(tx3['txid'], txid3)  # tx3 must be available (unconfirmed)
+
+        # restart bitcoind
         self.nodes[0].stop()
         bitcoind_processes[0].wait()
-        self.nodes[0] = start_node(0,self.options.tmpdir)
-        
+        self.nodes[0] = start_node(0, self.options.tmpdir)
+
         tx3 = self.nodes[0].gettransaction(txid3)
-        assert_equal(tx3['txid'], txid3) #tx must be available (unconfirmed)
-        
+        assert_equal(tx3['txid'], txid3)  # tx must be available (unconfirmed)
+
         self.nodes[0].stop()
         bitcoind_processes[0].wait()
-        
-        #restart bitcoind with zapwallettxes
-        self.nodes[0] = start_node(0,self.options.tmpdir, ["-zapwallettxes=1"])
-        
+
+        # restart bitcoind with zapwallettxes
+        self.nodes[0] = start_node(
+            0, self.options.tmpdir, ["-zapwallettxes=1"])
+
         assert_raises(JSONRPCException, self.nodes[0].gettransaction, [txid3])
-        #there must be a expection because the unconfirmed wallettx0 must be gone by now
+        # there must be a expection because the unconfirmed wallettx0 must be
+        # gone by now
 
         tx0 = self.nodes[0].gettransaction(txid0)
-        assert_equal(tx0['txid'], txid0) #tx0 (confirmed) must still be available because it was confirmed
+        assert_equal(tx0['txid'], txid0)
+                     # tx0 (confirmed) must still be available because it was
+                     # confirmed
 
 
 if __name__ == '__main__':
-    ZapWalletTXesTest ().main ()
+    ZapWalletTXesTest().main()
diff --git a/qa/rpc-tests/zmq_test.py b/qa/rpc-tests/zmq_test.py
--- a/qa/rpc-tests/zmq_test.py
+++ b/qa/rpc-tests/zmq_test.py
@@ -12,6 +12,7 @@
 import zmq
 import struct
 
+
 class ZMQTest (BitcoinTestFramework):
 
     def __init__(self):
@@ -27,11 +28,12 @@
         self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
         self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % self.port)
         return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=[
-            ['-zmqpubhashtx=tcp://127.0.0.1:'+str(self.port), '-zmqpubhashblock=tcp://127.0.0.1:'+str(self.port)],
+            ['-zmqpubhashtx=tcp://127.0.0.1:' +
+                str(self.port), '-zmqpubhashblock=tcp://127.0.0.1:' + str(self.port)],
             [],
             [],
             []
-            ])
+        ])
 
     def run_test(self):
         self.sync_all()
@@ -46,16 +48,18 @@
         body = msg[1]
         nseq = msg[2]
         msgSequence = struct.unpack('<I', msg[-1])[-1]
-        assert_equal(msgSequence, 0) #must be sequence 0 on hashtx
+        assert_equal(msgSequence, 0)  # must be sequence 0 on hashtx
 
         msg = self.zmqSubSocket.recv_multipart()
         topic = msg[0]
         body = msg[1]
         msgSequence = struct.unpack('<I', msg[-1])[-1]
-        assert_equal(msgSequence, 0) #must be sequence 0 on hashblock
+        assert_equal(msgSequence, 0)  # must be sequence 0 on hashblock
         blkhash = bytes_to_hex_str(body)
 
-        assert_equal(genhashes[0], blkhash) #blockhash from generate must be equal to the hash received over zmq
+        assert_equal(genhashes[0], blkhash)
+                     # blockhash from generate must be equal to the hash
+                     # received over zmq
 
         n = 10
         genhashes = self.nodes[1].generate(n)
@@ -63,21 +67,24 @@
 
         zmqHashes = []
         blockcount = 0
-        for x in range(0,n*2):
+        for x in range(0, n * 2):
             msg = self.zmqSubSocket.recv_multipart()
             topic = msg[0]
             body = msg[1]
             if topic == b"hashblock":
                 zmqHashes.append(bytes_to_hex_str(body))
                 msgSequence = struct.unpack('<I', msg[-1])[-1]
-                assert_equal(msgSequence, blockcount+1)
+                assert_equal(msgSequence, blockcount + 1)
                 blockcount += 1
 
-        for x in range(0,n):
-            assert_equal(genhashes[x], zmqHashes[x]) #blockhash from generate must be equal to the hash received over zmq
+        for x in range(0, n):
+            assert_equal(genhashes[x], zmqHashes[x])
+                         # blockhash from generate must be equal to the hash
+                         # received over zmq
 
-        #test tx from a second node
-        hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
+        # test tx from a second node
+        hashRPC = self.nodes[1].sendtoaddress(
+            self.nodes[0].getnewaddress(), 1.0)
         self.sync_all()
 
         # now we should receive a zmq msg because the tx was broadcast
@@ -88,10 +95,12 @@
         if topic == b"hashtx":
             hashZMQ = bytes_to_hex_str(body)
             msgSequence = struct.unpack('<I', msg[-1])[-1]
-            assert_equal(msgSequence, blockcount+1)
+            assert_equal(msgSequence, blockcount + 1)
 
-        assert_equal(hashRPC, hashZMQ) #blockhash from generate must be equal to the hash received over zmq
+        assert_equal(hashRPC, hashZMQ)
+                     # blockhash from generate must be equal to the hash
+                     # received over zmq
 
 
 if __name__ == '__main__':
-    ZMQTest ().main ()
+    ZMQTest().main()