diff --git a/.arclint b/.arclint --- a/.arclint +++ b/.arclint @@ -23,7 +23,7 @@ "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)", - "(^test/functional/[a-emt].*\\.py$)" + "(^test/functional/[a-fmt].*\\.py$)" ], "flags": [ "--aggressive", @@ -35,7 +35,7 @@ "type": "black", "version": ">=23.0.0", "include": [ - "(^test/functional/[a-emt].*\\.py$)" + "(^test/functional/[a-fmt].*\\.py$)" ], "flags": [ "--preview" diff --git a/test/functional/feature_abortnode.py b/test/functional/feature_abortnode.py --- a/test/functional/feature_abortnode.py +++ b/test/functional/feature_abortnode.py @@ -32,7 +32,7 @@ datadir = get_datadir_path(self.options.tmpdir, 0) # Deleting the undo file will result in reorg failure - os.unlink(os.path.join(datadir, self.chain, 'blocks', 'rev00000.dat')) + os.unlink(os.path.join(datadir, self.chain, "blocks", "rev00000.dat")) # Connecting to a node with a more work chain will trigger a reorg # attempt. @@ -48,5 +48,5 @@ self.nodes[0].assert_start_raises_init_error() -if __name__ == '__main__': +if __name__ == "__main__": AbortNodeTest().main() diff --git a/test/functional/feature_addrman.py b/test/functional/feature_addrman.py --- a/test/functional/feature_addrman.py +++ b/test/functional/feature_addrman.py @@ -51,17 +51,17 @@ self.num_nodes = 1 def run_test(self): - peers_dat = os.path.join( - self.nodes[0].datadir, - self.chain, - "peers.dat") - - def init_error(reason): return ( - f"Error: Invalid or corrupt peers.dat \\({reason}\\). If you believe this " - f"is a bug, please report it to {self.config['environment']['PACKAGE_BUGREPORT']}. " - f'As a workaround, you can move the file \\("{peers_dat}"\\) out of the way \\(rename, ' - "move, or delete\\) to have a new one created on the next start." - ) + peers_dat = os.path.join(self.nodes[0].datadir, self.chain, "peers.dat") + + def init_error(reason): + return ( + f"Error: Invalid or corrupt peers.dat \\({reason}\\). If you believe" + " this is a bug, please report it to" + f" {self.config['environment']['PACKAGE_BUGREPORT']}. As a workaround," + f' you can move the file \\("{peers_dat}"\\) out of the way' + " \\(rename, move, or delete\\) to have a new one created on the next" + " start." + ) self.log.info("Check that mocked addrman is valid") self.stop_node(0) @@ -71,7 +71,8 @@ assert_equal(self.nodes[0].getnodeaddresses(), []) self.log.info( - "Check that addrman with negative lowest_compatible cannot be read") + "Check that addrman with negative lowest_compatible cannot be read" + ) self.stop_node(0) write_addrman(peers_dat, lowest_compatible=-32) self.nodes[0].assert_start_raises_init_error( @@ -82,14 +83,18 @@ match=ErrorMatch.FULL_REGEX, ) - self.log.info( - "Check that addrman from future is overwritten with new addrman") + self.log.info("Check that addrman from future is overwritten with new addrman") self.stop_node(0) write_addrman(peers_dat, lowest_compatible=111) assert_equal(os.path.exists(f"{peers_dat}.bak"), False) - with self.nodes[0].assert_debug_log([ - f'Creating new peers.dat because the file version was not compatible ("{peers_dat}"). Original backed up to peers.dat.bak', - ]): + with self.nodes[0].assert_debug_log( + [ + ( + "Creating new peers.dat because the file version was not" + f' compatible ("{peers_dat}"). Original backed up to peers.dat.bak' + ), + ] + ): self.start_node(0) assert_equal(self.nodes[0].getnodeaddresses(), []) assert_equal(os.path.exists(f"{peers_dat}.bak"), True) @@ -124,7 +129,9 @@ write_addrman(peers_dat, len_tried=-1) self.nodes[0].assert_start_raises_init_error( expected_msg=init_error( - "Corrupt AddrMan serialization: nTried=-1, should be in \\[0, 16384\\]:.*"), + "Corrupt AddrMan serialization: nTried=-1, should be in \\[0," + " 16384\\]:.*" + ), match=ErrorMatch.FULL_REGEX, ) @@ -133,26 +140,29 @@ write_addrman(peers_dat, len_new=-1) self.nodes[0].assert_start_raises_init_error( expected_msg=init_error( - "Corrupt AddrMan serialization: nNew=-1, should be in \\[0, 65536\\]:.*"), + "Corrupt AddrMan serialization: nNew=-1, should be in \\[0, 65536\\]:.*" + ), match=ErrorMatch.FULL_REGEX, ) - self.log.info( - "Check that corrupt addrman cannot be read (failed check)") + self.log.info("Check that corrupt addrman cannot be read (failed check)") self.stop_node(0) write_addrman(peers_dat, bucket_key=0) self.nodes[0].assert_start_raises_init_error( expected_msg=init_error( - "Corrupt data. Consistency check failed with code -16: .*"), + "Corrupt data. Consistency check failed with code -16: .*" + ), match=ErrorMatch.FULL_REGEX, ) self.log.info("Check that missing addrman is recreated") self.stop_node(0) os.remove(peers_dat) - with self.nodes[0].assert_debug_log([ + with self.nodes[0].assert_debug_log( + [ f'Creating peers.dat because the file was not found ("{peers_dat}")', - ]): + ] + ): self.start_node(0) assert_equal(self.nodes[0].getnodeaddresses(), []) diff --git a/test/functional/feature_anchors.py b/test/functional/feature_anchors.py --- a/test/functional/feature_anchors.py +++ b/test/functional/feature_anchors.py @@ -28,7 +28,8 @@ assert not os.path.exists(node_anchors_path) self.log.info( - f"Add {BLOCK_RELAY_CONNECTIONS} block-relay-only connections to node") + f"Add {BLOCK_RELAY_CONNECTIONS} block-relay-only connections to node" + ) for i in range(BLOCK_RELAY_CONNECTIONS): self.log.debug(f"block-relay-only: {i}") self.nodes[0].add_outbound_p2p_connection( @@ -76,8 +77,7 @@ self.log.info("Start node") self.start_node(0) - self.log.info( - "When node starts, check if anchors.dat doesn't exist anymore") + self.log.info("When node starts, check if anchors.dat doesn't exist anymore") assert not os.path.exists(node_anchors_path) diff --git a/test/functional/feature_asmap.py b/test/functional/feature_asmap.py --- a/test/functional/feature_asmap.py +++ b/test/functional/feature_asmap.py @@ -28,59 +28,64 @@ from test_framework.test_framework import BitcoinTestFramework -DEFAULT_ASMAP_FILENAME = 'ip_asn.map' # defined in src/init.cpp -ASMAP = '../../src/test/data/asmap.raw' # path to unit test skeleton asmap -VERSION = 'fec61fa21a9f46f3b17bdcd660d7f4cd90b966aad3aec593c99b35f0aca15853' +DEFAULT_ASMAP_FILENAME = "ip_asn.map" # defined in src/init.cpp +ASMAP = "../../src/test/data/asmap.raw" # path to unit test skeleton asmap +VERSION = "fec61fa21a9f46f3b17bdcd660d7f4cd90b966aad3aec593c99b35f0aca15853" def expected_messages(filename): - return [f'Opened asmap file "{filename}" (59 bytes) from disk', - f'Using asmap version {VERSION} for IP bucketing'] + return [ + f'Opened asmap file "{filename}" (59 bytes) from disk', + f"Using asmap version {VERSION} for IP bucketing", + ] class AsmapTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 - self.extra_args = [[ - # Do addrman checks on all operations. - "-checkaddrman=1" - ]] + self.extra_args = [ + [ + # Do addrman checks on all operations. + "-checkaddrman=1" + ] + ] def fill_addrman(self, node_id): """Add 1 tried address to the addrman, followed by 1 new address.""" for addr, tried in [[0, True], [1, False]]: self.nodes[node_id].addpeeraddress( - address=f"101.{addr}.0.0", tried=tried, port=8333) + address=f"101.{addr}.0.0", tried=tried, port=8333 + ) def test_without_asmap_arg(self): - self.log.info('Test bitcoind with no -asmap arg passed') + self.log.info("Test bitcoind with no -asmap arg passed") self.stop_node(0) - with self.node.assert_debug_log(['Using /16 prefix for IP bucketing']): + with self.node.assert_debug_log(["Using /16 prefix for IP bucketing"]): self.start_node(0) def test_asmap_with_absolute_path(self): - self.log.info('Test bitcoind -asmap=') + self.log.info("Test bitcoind -asmap=") self.stop_node(0) - filename = os.path.join(self.datadir, 'my-map-file.map') + filename = os.path.join(self.datadir, "my-map-file.map") shutil.copyfile(self.asmap_raw, filename) with self.node.assert_debug_log(expected_messages(filename)): - self.start_node(0, [f'-asmap={filename}']) + self.start_node(0, [f"-asmap={filename}"]) os.remove(filename) def test_asmap_with_relative_path(self): - self.log.info('Test bitcoind -asmap=') + self.log.info("Test bitcoind -asmap=") self.stop_node(0) - name = 'ASN_map' + name = "ASN_map" filename = os.path.join(self.datadir, name) shutil.copyfile(self.asmap_raw, filename) with self.node.assert_debug_log(expected_messages(filename)): - self.start_node(0, [f'-asmap={name}']) + self.start_node(0, [f"-asmap={name}"]) os.remove(filename) def test_default_asmap(self): shutil.copyfile(self.asmap_raw, self.default_asmap) - for arg in ['-asmap', '-asmap=']: - self.log.info(f'Test bitcoind {arg} (using default map file)') + for arg in ["-asmap", "-asmap="]: + self.log.info(f"Test bitcoind {arg} (using default map file)") self.stop_node(0) with self.node.assert_debug_log(expected_messages(self.default_asmap)): self.start_node(0, [arg]) @@ -88,7 +93,8 @@ def test_asmap_interaction_with_addrman_containing_entries(self): self.log.info( - "Test bitcoind -asmap restart with addrman containing new and tried entries") + "Test bitcoind -asmap restart with addrman containing new and tried entries" + ) self.stop_node(0) shutil.copyfile(self.asmap_raw, self.default_asmap) self.start_node(0, ["-asmap", "-checkaddrman=1"]) @@ -105,20 +111,22 @@ os.remove(self.default_asmap) def test_default_asmap_with_missing_file(self): - self.log.info('Test bitcoind -asmap with missing default map file') + self.log.info("Test bitcoind -asmap with missing default map file") self.stop_node(0) msg = f'Error: Could not find asmap file "{self.default_asmap}"' self.node.assert_start_raises_init_error( - extra_args=['-asmap'], expected_msg=msg) + extra_args=["-asmap"], expected_msg=msg + ) def test_empty_asmap(self): - self.log.info('Test bitcoind -asmap with empty map file') + self.log.info("Test bitcoind -asmap with empty map file") self.stop_node(0) with open(self.default_asmap, "w", encoding="utf-8") as f: f.write("") msg = f'Error: Could not parse asmap file "{self.default_asmap}"' self.node.assert_start_raises_init_error( - extra_args=['-asmap'], expected_msg=msg) + extra_args=["-asmap"], expected_msg=msg + ) os.remove(self.default_asmap) def run_test(self): @@ -126,8 +134,8 @@ self.datadir = os.path.join(self.node.datadir, self.chain) self.default_asmap = os.path.join(self.datadir, DEFAULT_ASMAP_FILENAME) self.asmap_raw = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), ASMAP) + os.path.dirname(os.path.realpath(__file__)), ASMAP + ) self.test_without_asmap_arg() self.test_asmap_with_absolute_path() @@ -138,5 +146,5 @@ self.test_empty_asmap() -if __name__ == '__main__': +if __name__ == "__main__": AsmapTest().main() diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py --- a/test/functional/feature_assumevalid.py +++ b/test/functional/feature_assumevalid.py @@ -85,8 +85,9 @@ # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) - self.block_time = self.nodes[0].getblock( - self.nodes[0].getbestblockhash())['time'] + 1 + self.block_time = ( + self.nodes[0].getblock(self.nodes[0].getbestblockhash())["time"] + 1 + ) self.blocks = [] @@ -97,8 +98,9 @@ # Create the first block with a coinbase output to our key height = 1 - block = create_block(self.tip, create_coinbase( - height, coinbase_pubkey), self.block_time) + block = create_block( + self.tip, create_coinbase(height, coinbase_pubkey), self.block_time + ) self.blocks.append(block) self.block_time += 1 block.solve() @@ -109,8 +111,7 @@ # Bury the block 100 deep so the coinbase output is spendable for _ in range(100): - block = create_block( - self.tip, create_coinbase(height), self.block_time) + block = create_block(self.tip, create_coinbase(height), self.block_time) block.solve() self.blocks.append(block) self.tip = block.sha256 @@ -120,14 +121,12 @@ # Create a transaction spending the coinbase output with an invalid # (null) signature tx = CTransaction() - tx.vin.append( - CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) + tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) pad_tx(tx) tx.calc_sha256() - block102 = create_block( - self.tip, create_coinbase(height), self.block_time) + block102 = create_block(self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block102.vtx.extend([tx]) block102.hashMerkleRoot = block102.calc_merkle_root() @@ -140,8 +139,7 @@ # Bury the assumed valid block 2100 deep for _ in range(2100): - block = create_block( - self.tip, create_coinbase(height), self.block_time) + block = create_block(self.tip, create_coinbase(height), self.block_time) block.nVersion = 4 block.solve() self.blocks.append(block) @@ -178,8 +176,9 @@ # Syncing 2200 blocks can take a while on slow systems. Give it plenty # of time to sync. p2p1.sync_with_ping(960) - assert_equal(self.nodes[1].getblock( - self.nodes[1].getbestblockhash())['height'], 2202) + assert_equal( + self.nodes[1].getblock(self.nodes[1].getbestblockhash())["height"], 2202 + ) # Send blocks to node2. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p2) @@ -187,5 +186,5 @@ assert_equal(self.nodes[2].getblockcount(), 101) -if __name__ == '__main__': +if __name__ == "__main__": AssumeValidTest().main() diff --git a/test/functional/feature_bind_extra.py b/test/functional/feature_bind_extra.py --- a/test/functional/feature_bind_extra.py +++ b/test/functional/feature_bind_extra.py @@ -25,7 +25,7 @@ def setup_network(self): # Due to OS-specific network stats queries, we only run on Linux. self.log.info("Checking for Linux") - if not sys.platform.startswith('linux'): + if not sys.platform.startswith("linux"): raise SkipTest("This test can only be run on Linux.") loopback_ipv4 = addr_to_hex("127.0.0.1") @@ -39,19 +39,15 @@ # Node0, no normal -bind=... with -bind=...=onion, thus only the tor # target. self.expected.append( - [ - [f"-bind=127.0.0.1:{port}=onion"], - [(loopback_ipv4, port)] - ], + [[f"-bind=127.0.0.1:{port}=onion"], [(loopback_ipv4, port)]], ) port += 1 # Node1, both -bind=... and -bind=...=onion. self.expected.append( [ - [f"-bind=127.0.0.1:{port}", - f"-bind=127.0.0.1:{port + 1}=onion"], - [(loopback_ipv4, port), (loopback_ipv4, port + 1)] + [f"-bind=127.0.0.1:{port}", f"-bind=127.0.0.1:{port + 1}=onion"], + [(loopback_ipv4, port), (loopback_ipv4, port + 1)], ], ) port += 2 @@ -72,11 +68,7 @@ # possible to bind on "::". This makes it unpredictable whether to expect # that bitcoind has bound on "::1" (for RPC) and "::" (for P2P). ipv6_addr_len_bytes = 32 - binds = set( - filter( - lambda e: len( - e[0]) != ipv6_addr_len_bytes, - binds)) + binds = set(filter(lambda e: len(e[0]) != ipv6_addr_len_bytes, binds)) # Remove RPC ports. They are not relevant for this test. binds = set(filter(lambda e: e[1] != rpc_port(i), binds)) assert_equal(binds, set(self.expected[i][1])) @@ -84,5 +76,5 @@ self.log.info(f"Stopped node {i}") -if __name__ == '__main__': +if __name__ == "__main__": BindExtraTest().main() diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py --- a/test/functional/feature_bip68_sequence.py +++ b/test/functional/feature_bip68_sequence.py @@ -26,12 +26,12 @@ satoshi_round, ) -SEQUENCE_LOCKTIME_DISABLE_FLAG = (1 << 31) +SEQUENCE_LOCKTIME_DISABLE_FLAG = 1 << 31 # this means use time (0 means height) -SEQUENCE_LOCKTIME_TYPE_FLAG = (1 << 22) +SEQUENCE_LOCKTIME_TYPE_FLAG = 1 << 22 # this is a bit-shift SEQUENCE_LOCKTIME_GRANULARITY = 9 -SEQUENCE_LOCKTIME_MASK = 0x0000ffff +SEQUENCE_LOCKTIME_MASK = 0x0000FFFF # RPC error for non-BIP68 final transactions NOT_FINAL_ERROR = "non-BIP68-final" @@ -48,7 +48,7 @@ [ "-acceptnonstdtxn=0", "-automaticunparking=1", - ] + ], ] def skip_test_if_missing_module(self): @@ -69,15 +69,18 @@ self.log.info("Running test sequence-lock-unconfirmed-inputs") self.test_sequence_lock_unconfirmed_inputs() - self.log.info( - "Running test BIP68 not consensus before versionbits activation") + self.log.info("Running test BIP68 not consensus before versionbits activation") self.test_bip68_not_consensus() self.log.info("Activating BIP68 (and 112/113)") self.activateCSV() print("Verifying nVersion=2 transactions are standard.") - print("Note that with current versions of bitcoin software, nVersion=2 transactions are always standard (independent of BIP68 activation status).") + print( + "Note that with current versions of bitcoin software, nVersion=2" + " transactions are always standard (independent of BIP68 activation" + " status)." + ) self.test_version2_relay() self.log.info("Passed") @@ -103,12 +106,14 @@ # input to mature. sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1 tx1.vin = [ - CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)] - tx1.vout = [CTxOut(value, CScript([b'a']))] + CTxIn( + COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value + ) + ] + tx1.vout = [CTxOut(value, CScript([b"a"]))] pad_tx(tx1) - tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))[ - "hex"] + tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))["hex"] tx1_id = self.nodes[0].sendrawtransaction(tx1_signed) tx1_id = int(tx1_id, 16) @@ -116,14 +121,15 @@ # fail tx2 = CTransaction() tx2.nVersion = 2 - sequence_value = sequence_value & 0x7fffffff + sequence_value = sequence_value & 0x7FFFFFFF tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)] - tx2.vout = [CTxOut(int(value - self.relayfee * XEC), CScript([b'a']))] + tx2.vout = [CTxOut(int(value - self.relayfee * XEC), CScript([b"a"]))] pad_tx(tx2) tx2.rehash() - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, - self.nodes[0].sendrawtransaction, ToHex(tx2)) + assert_raises_rpc_error( + -26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2) + ) # Setting the version back down to 1 should disable the sequence lock, # so this should be accepted. @@ -135,7 +141,8 @@ # the current tip). def get_median_time_past(self, confirmations): block_hash = self.nodes[0].getblockhash( - self.nodes[0].getblockcount() - confirmations) + self.nodes[0].getblockcount() - confirmations + ) return self.nodes[0].getblockheader(block_hash)["mediantime"] # Test that sequence locks are respected for transactions spending @@ -149,6 +156,7 @@ addresses.append(self.nodes[0].getnewaddress()) while len(self.nodes[0].listunspent()) < 200: import random + random.shuffle(addresses) num_outputs = random.randint(1, max_outputs) outputs = {} @@ -180,14 +188,14 @@ value = 0 for j in range(num_inputs): # this disables sequence locks - sequence_value = 0xfffffffe + sequence_value = 0xFFFFFFFE # 50% chance we enable sequence locks if random.randint(0, 1): using_sequence_locks = True # 10% of the time, make the input sequence value pass - input_will_pass = (random.randint(1, 10) == 1) + input_will_pass = random.randint(1, 10) == 1 sequence_value = utxos[j]["confirmations"] if not input_will_pass: sequence_value += 1 @@ -198,16 +206,16 @@ # from the tip so that we're looking up MTP of the block # PRIOR to the one the input appears in, as per the BIP68 # spec. - orig_time = self.get_median_time_past( - utxos[j]["confirmations"]) + orig_time = self.get_median_time_past(utxos[j]["confirmations"]) # MTP of the tip cur_time = self.get_median_time_past(0) # can only timelock this input if it's not too old -- # otherwise use height can_time_lock = True - if ((cur_time - orig_time) - >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK: + if ( + (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY + ) >= SEQUENCE_LOCKTIME_MASK: can_time_lock = False # if time-lockable, then 50% chance we make this a time @@ -218,26 +226,35 @@ time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY if input_will_pass and time_delta > cur_time - orig_time: sequence_value = ( - (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) - elif (not input_will_pass and time_delta <= cur_time - orig_time): + cur_time - orig_time + ) >> SEQUENCE_LOCKTIME_GRANULARITY + elif not input_will_pass and time_delta <= cur_time - orig_time: sequence_value = ( - (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) + 1 + (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY + ) + 1 sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx.vin.append( - CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value)) + CTxIn( + COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), + nSequence=sequence_value, + ) + ) value += utxos[j]["amount"] * XEC # Overestimate the size of the tx - signatures should be less than # 120 bytes, and leave 50 for the output tx_size = len(ToHex(tx)) // 2 + 120 * num_inputs + 50 tx.vout.append( - CTxOut(int(value - self.relayfee * tx_size * XEC / 1000), CScript([b'a']))) - rawtx = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))[ - "hex"] + CTxOut( + int(value - self.relayfee * tx_size * XEC / 1000), CScript([b"a"]) + ) + ) + rawtx = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))["hex"] - if (using_sequence_locks and not should_pass): + if using_sequence_locks and not should_pass: # This transaction should be rejected - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, - self.nodes[0].sendrawtransaction, rawtx) + assert_raises_rpc_error( + -26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx + ) else: # This raw transaction should be accepted self.nodes[0].sendrawtransaction(rawtx) @@ -252,8 +269,7 @@ cur_height = self.nodes[0].getblockcount() # Create a mempool tx. - txid = self.nodes[0].sendtoaddress( - self.nodes[0].getnewaddress(), 2000000) + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2000000) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() @@ -269,10 +285,10 @@ tx2 = CTransaction() tx2.nVersion = 2 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] - tx2.vout = [ - CTxOut(int(0), CScript([b'a']))] - tx2.vout[0].nValue = tx1.vout[0].nValue - \ - fee_multiplier * self.nodes[0].calculate_fee(tx2) + tx2.vout = [CTxOut(int(0), CScript([b"a"]))] + tx2.vout[0].nValue = tx1.vout[0].nValue - fee_multiplier * self.nodes[ + 0 + ].calculate_fee(tx2) tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) tx2.rehash() @@ -288,17 +304,23 @@ tx = CTransaction() tx.nVersion = 2 - tx.vin = [ - CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)] + tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)] tx.vout = [ - CTxOut(int(orig_tx.vout[0].nValue - fee_multiplier * node.calculate_fee(tx)), CScript([b'a']))] + CTxOut( + int( + orig_tx.vout[0].nValue - fee_multiplier * node.calculate_fee(tx) + ), + CScript([b"a"]), + ) + ] pad_tx(tx) tx.rehash() - if (orig_tx.hash in node.getrawmempool()): + if orig_tx.hash in node.getrawmempool(): # sendrawtransaction should fail if the tx is in the mempool - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, - node.sendrawtransaction, ToHex(tx)) + assert_raises_rpc_error( + -26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx) + ) else: # sendrawtransaction should succeed if the tx is not in the # mempool @@ -306,15 +328,14 @@ return tx - test_nonzero_locks( - tx2, self.nodes[0], use_height_lock=True) - test_nonzero_locks( - tx2, self.nodes[0], use_height_lock=False) + test_nonzero_locks(tx2, self.nodes[0], use_height_lock=True) + test_nonzero_locks(tx2, self.nodes[0], use_height_lock=False) # Now mine some blocks, but make sure tx2 doesn't get mined. # Use prioritisetransaction to lower the effective feerate to 0 self.nodes[0].prioritisetransaction( - txid=tx2.hash, fee_delta=-fee_multiplier * self.nodes[0].calculate_fee(tx2)) + txid=tx2.hash, fee_delta=-fee_multiplier * self.nodes[0].calculate_fee(tx2) + ) cur_time = int(time.time()) for _ in range(10): self.nodes[0].setmocktime(cur_time + 600) @@ -323,14 +344,13 @@ assert tx2.hash in self.nodes[0].getrawmempool() - test_nonzero_locks( - tx2, self.nodes[0], use_height_lock=True) - test_nonzero_locks( - tx2, self.nodes[0], use_height_lock=False) + test_nonzero_locks(tx2, self.nodes[0], use_height_lock=True) + test_nonzero_locks(tx2, self.nodes[0], use_height_lock=False) # Mine tx2, and then try again self.nodes[0].prioritisetransaction( - txid=tx2.hash, fee_delta=fee_multiplier * self.nodes[0].calculate_fee(tx2)) + txid=tx2.hash, fee_delta=fee_multiplier * self.nodes[0].calculate_fee(tx2) + ) # Advance the time on the node so that we can test timelocks self.nodes[0].setmocktime(cur_time + 600) @@ -341,31 +361,30 @@ # Now that tx2 is not in the mempool, a sequence locked spend should # succeed - tx3 = test_nonzero_locks( - tx2, self.nodes[0], use_height_lock=False) + tx3 = test_nonzero_locks(tx2, self.nodes[0], use_height_lock=False) assert tx3.hash in self.nodes[0].getrawmempool() self.generate(self.nodes[0], 1) assert tx3.hash not in self.nodes[0].getrawmempool() # One more test, this time using height locks - tx4 = test_nonzero_locks( - tx3, self.nodes[0], use_height_lock=True) + tx4 = test_nonzero_locks(tx3, self.nodes[0], use_height_lock=True) assert tx4.hash in self.nodes[0].getrawmempool() # Now try combining confirmed and unconfirmed inputs - tx5 = test_nonzero_locks( - tx4, self.nodes[0], use_height_lock=True) + tx5 = test_nonzero_locks(tx4, self.nodes[0], use_height_lock=True) assert tx5.hash not in self.nodes[0].getrawmempool() utxos = self.nodes[0].listunspent() tx5.vin.append( - CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) + CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1) + ) tx5.vout[0].nValue += int(utxos[0]["amount"] * XEC) raw_tx5 = self.nodes[0].signrawtransactionwithwallet(ToHex(tx5))["hex"] - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, - self.nodes[0].sendrawtransaction, raw_tx5) + assert_raises_rpc_error( + -26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5 + ) # Test mempool-BIP68 consistency after reorg # @@ -390,12 +409,12 @@ block.solve() tip = block.sha256 assert_equal( - None if i == 1 else 'inconclusive', - self.nodes[0].submitblock( - ToHex(block))) + None if i == 1 else "inconclusive", + self.nodes[0].submitblock(ToHex(block)), + ) tmpl = self.nodes[0].getblocktemplate() - tmpl['previousblockhash'] = f"{tip:x}" - tmpl['transactions'] = [] + tmpl["previousblockhash"] = f"{tip:x}" + tmpl["transactions"] = [] cur_time += 1 mempool = self.nodes[0].getrawmempool() @@ -404,12 +423,11 @@ # Reset the chain and get rid of the mocktimed-blocks self.nodes[0].setmocktime(0) - self.nodes[0].invalidateblock( - self.nodes[0].getblockhash(cur_height + 1)) + self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height + 1)) self.generate(self.nodes[0], 10, sync_fun=self.no_op) def get_csv_status(self): - height = self.nodes[0].getblockchaininfo()['blocks'] + height = self.nodes[0].getblockchaininfo()["blocks"] return height >= 576 # Make sure that BIP68 isn't being used to validate blocks, prior to @@ -418,8 +436,7 @@ # this test should be moved to run earlier, or deleted. def test_bip68_not_consensus(self): assert_equal(self.get_csv_status(), False) - txid = self.nodes[0].sendtoaddress( - self.nodes[0].getnewaddress(), 2000000) + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2000000) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() @@ -429,7 +446,8 @@ tx2.nVersion = 1 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ - CTxOut(int(tx1.vout[0].nValue - self.relayfee * XEC), CScript([b'a']))] + CTxOut(int(tx1.vout[0].nValue - self.relayfee * XEC), CScript([b"a"])) + ] # sign tx2 tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] @@ -447,17 +465,18 @@ tx3.nVersion = 2 tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)] tx3.vout = [ - CTxOut(int(tx2.vout[0].nValue - self.relayfee * XEC), CScript([b'a']))] + CTxOut(int(tx2.vout[0].nValue - self.relayfee * XEC), CScript([b"a"])) + ] pad_tx(tx3) tx3.rehash() - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, - self.nodes[0].sendrawtransaction, ToHex(tx3)) + assert_raises_rpc_error( + -26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3) + ) # make a block that violates bip68; ensure that the tip updates block = create_block(tmpl=self.nodes[0].getblocktemplate()) - block.vtx.extend( - sorted([tx1, tx2, tx3], key=lambda tx: tx.get_id())) + block.vtx.extend(sorted([tx1, tx2, tx3], key=lambda tx: tx.get_id())) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() @@ -471,9 +490,8 @@ height = self.nodes[0].getblockcount() assert_greater_than(csv_activation_height - height, 1) self.generate( - self.nodes[0], - csv_activation_height - height - 1, - sync_fun=self.no_op) + self.nodes[0], csv_activation_height - height - 1, sync_fun=self.no_op + ) assert_equal(self.get_csv_status(), False) self.disconnect_nodes(0, 1) self.generate(self.nodes[0], 1, sync_fun=self.no_op) @@ -489,13 +507,12 @@ inputs = [] outputs = {self.nodes[1].getnewaddress(): 1000000.0} rawtx = self.nodes[1].createrawtransaction(inputs, outputs) - rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex'] + rawtxfund = self.nodes[1].fundrawtransaction(rawtx)["hex"] tx = FromHex(CTransaction(), rawtxfund) tx.nVersion = 2 - tx_signed = self.nodes[1].signrawtransactionwithwallet(ToHex(tx))[ - "hex"] + tx_signed = self.nodes[1].signrawtransactionwithwallet(ToHex(tx))["hex"] self.nodes[1].sendrawtransaction(tx_signed) -if __name__ == '__main__': +if __name__ == "__main__": BIP68Test().main() diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -67,7 +67,7 @@ # Valid for block at height 120 -DUPLICATE_COINBASE_SCRIPT_SIG = b'\x01\x78' +DUPLICATE_COINBASE_SCRIPT_SIG = b"\x01\x78" class FullBlockTest(BitcoinTestFramework): @@ -75,8 +75,7 @@ self.num_nodes = 1 self.setup_clean_chain = True # This is a consensus block test, we don't care about tx policy - self.extra_args = [['-noparkdeepreorg', - '-acceptnonstdtxn=1']] + self.extra_args = [["-noparkdeepreorg", "-acceptnonstdtxn=1"]] def run_test(self): node = self.nodes[0] # convenience reference to the node @@ -94,11 +93,11 @@ self.spendable_outputs = [] # Create a new block - b_dup_cb = self.next_block('dup_cb') + b_dup_cb = self.next_block("dup_cb") b_dup_cb.vtx[0].vin[0].scriptSig = DUPLICATE_COINBASE_SCRIPT_SIG b_dup_cb.vtx[0].rehash() duplicate_tx = b_dup_cb.vtx[0] - b_dup_cb = self.update_block('dup_cb', []) + b_dup_cb = self.update_block("dup_cb", []) self.send_blocks([b_dup_cb]) b0 = self.next_block(0) @@ -146,9 +145,7 @@ if template.valid_in_block: continue - self.log.info( - "Reject block with invalid tx: %s", - TxTemplate.__name__) + self.log.info("Reject block with invalid tx: %s", TxTemplate.__name__) blockname = f"for_invalid.{TxTemplate.__name__}" badblock = self.next_block(blockname) badtx = template.get_tx() @@ -157,10 +154,12 @@ badtx.rehash() badblock = self.update_block(blockname, [badtx]) self.send_blocks( - [badblock], success=False, - reject_reason=( - template.block_reject_reason or template.reject_reason), - reconnect=True, timeout=2) + [badblock], + success=False, + reject_reason=(template.block_reject_reason or template.reject_reason), + reconnect=True, + timeout=2, + ) self.move_tip(2) @@ -201,8 +200,7 @@ # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b7 (2) -> b8 (4) # \-> b3 (1) -> b4 (2) - self.log.info( - "Reject a chain with a double spend, even if it is longer") + self.log.info("Reject a chain with a double spend, even if it is longer") self.move_tip(5) b7 = self.next_block(7, spend=out[2]) self.send_blocks([b7], False) @@ -214,41 +212,50 @@ # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b9 (4) # \-> b3 (1) -> b4 (2) - self.log.info( - "Reject a block where the miner creates too much coinbase reward") + self.log.info("Reject a block where the miner creates too much coinbase reward") self.move_tip(6) b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1) - self.send_blocks([b9], success=False, - reject_reason='bad-cb-amount', reconnect=True) + self.send_blocks( + [b9], success=False, reject_reason="bad-cb-amount", reconnect=True + ) # Create a fork that ends in a block with too much fee (the one that causes the reorg) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b10 (3) -> b11 (4) # \-> b3 (1) -> b4 (2) self.log.info( - "Reject a chain where the miner creates too much coinbase reward, even if the chain is longer") + "Reject a chain where the miner creates too much coinbase reward, even if" + " the chain is longer" + ) self.move_tip(5) b10 = self.next_block(10, spend=out[3]) self.send_blocks([b10], False) b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1) - self.send_blocks([b11], success=False, - reject_reason='bad-cb-amount', reconnect=True) + self.send_blocks( + [b11], success=False, reject_reason="bad-cb-amount", reconnect=True + ) # Try again, but with a valid fork first # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b14 (5) # \-> b3 (1) -> b4 (2) self.log.info( - "Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)") + "Reject a chain where the miner creates too much coinbase reward, even if" + " the chain is longer (on a forked chain)" + ) self.move_tip(5) b12 = self.next_block(12, spend=out[3]) self.save_spendable_output() b13 = self.next_block(13, spend=out[4]) self.save_spendable_output() b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1) - self.send_blocks([b12, b13, b14], success=False, - reject_reason='bad-cb-amount', reconnect=True) + self.send_blocks( + [b12, b13, b14], + success=False, + reject_reason="bad-cb-amount", + reconnect=True, + ) # New tip should be b13. assert_equal(node.getbestblockhash(), b13.hash) @@ -265,8 +272,12 @@ self.log.info("Reject a block with a spend from a re-org'ed out tx") self.move_tip(15) b17 = self.next_block(17, spend=txout_b3) - self.send_blocks([b17], success=False, - reject_reason='bad-txns-inputs-missingorspent', reconnect=True) + self.send_blocks( + [b17], + success=False, + reject_reason="bad-txns-inputs-missingorspent", + reconnect=True, + ) # Attempt to spend a transaction created on a different fork (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) @@ -274,14 +285,19 @@ # \-> b18 (b3.vtx[1]) -> b19 (6) # \-> b3 (1) -> b4 (2) self.log.info( - "Reject a block with a spend from a re-org'ed out tx (on a forked chain)") + "Reject a block with a spend from a re-org'ed out tx (on a forked chain)" + ) self.move_tip(13) b18 = self.next_block(18, spend=txout_b3) self.send_blocks([b18], False) b19 = self.next_block(19, spend=out[6]) - self.send_blocks([b19], success=False, - reject_reason='bad-txns-inputs-missingorspent', reconnect=True) + self.send_blocks( + [b19], + success=False, + reject_reason="bad-txns-inputs-missingorspent", + reconnect=True, + ) # Attempt to spend a coinbase at depth too low # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) @@ -293,8 +309,9 @@ self.send_blocks( [b20], success=False, - reject_reason='bad-txns-premature-spend-of-coinbase', - reconnect=True) + reject_reason="bad-txns-premature-spend-of-coinbase", + reconnect=True, + ) # Attempt to spend a coinbase at depth too low (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) @@ -302,7 +319,8 @@ # \-> b21 (6) -> b22 (5) # \-> b3 (1) -> b4 (2) self.log.info( - "Reject a block spending an immature coinbase (on a forked chain)") + "Reject a block spending an immature coinbase (on a forked chain)" + ) self.move_tip(13) b21 = self.next_block(21, spend=out[6]) self.send_blocks([b21], False) @@ -311,8 +329,9 @@ self.send_blocks( [b22], success=False, - reject_reason='bad-txns-premature-spend-of-coinbase', - reconnect=True) + reject_reason="bad-txns-premature-spend-of-coinbase", + reconnect=True, + ) # Create a block on either side of LEGACY_MAX_BLOCK_SIZE and make sure its accepted/rejected # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) @@ -324,7 +343,7 @@ b23 = self.next_block(23, spend=out[6]) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b23.serialize()) - 69 - script_output = CScript([b'\x00' * script_length]) + script_output = CScript([b"\x00" * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) b23 = self.update_block(23, [tx]) @@ -338,17 +357,17 @@ # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) # \-> ... (6) -> ... (7) # \-> b3 (1) -> b4 (2) - self.log.info( - "Reject a block with coinbase input script size out of range") + self.log.info("Reject a block with coinbase input script size out of range") self.move_tip(15) b26 = self.next_block(26, spend=out[6]) - b26.vtx[0].vin[0].scriptSig = b'\x00' + b26.vtx[0].vin[0].scriptSig = b"\x00" b26.vtx[0].rehash() # update_block causes the merkle root to get updated, even with no new # transactions, and updates the required state. b26 = self.update_block(26, []) - self.send_blocks([b26], success=False, - reject_reason='bad-cb-length', reconnect=True) + self.send_blocks( + [b26], success=False, reject_reason="bad-cb-length", reconnect=True + ) # Extend the b26 chain to make sure bitcoind isn't accepting b26 b27 = self.next_block(27, spend=out[7]) @@ -357,11 +376,12 @@ # Now try a too-large-coinbase script self.move_tip(15) b28 = self.next_block(28, spend=out[6]) - b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 + b28.vtx[0].vin[0].scriptSig = b"\x00" * 101 b28.vtx[0].rehash() b28 = self.update_block(28, []) - self.send_blocks([b28], success=False, - reject_reason='bad-cb-length', reconnect=True) + self.send_blocks( + [b28], success=False, reject_reason="bad-cb-length", reconnect=True + ) # Extend the b28 chain to make sure bitcoind isn't accepting b28 b29 = self.next_block(29, spend=out[7]) @@ -370,7 +390,7 @@ # b30 has a max-sized coinbase scriptSig. self.move_tip(23) b30 = self.next_block(30) - b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 + b30.vtx[0].vin[0].scriptSig = b"\x00" * 100 b30.vtx[0].rehash() b30 = self.update_block(30, []) self.send_blocks([b30], True) @@ -395,21 +415,30 @@ # save 37's spendable output, but then double-spend out11 to invalidate # the block self.log.info( - "Reject a block spending transaction from a block which failed to connect") + "Reject a block spending transaction from a block which failed to connect" + ) self.move_tip(35) b37 = self.next_block(37, spend=out[11]) txout_b37 = b37.vtx[1] tx = self.create_and_sign_transaction(out[11], 0) b37 = self.update_block(37, [tx]) - self.send_blocks([b37], success=False, - reject_reason='bad-txns-inputs-missingorspent', reconnect=True) + self.send_blocks( + [b37], + success=False, + reject_reason="bad-txns-inputs-missingorspent", + reconnect=True, + ) # attempt to spend b37's first non-coinbase tx, at which point b37 was # still considered valid self.move_tip(35) b38 = self.next_block(38, spend=txout_b37) - self.send_blocks([b38], success=False, - reject_reason='bad-txns-inputs-missingorspent', reconnect=True) + self.send_blocks( + [b38], + success=False, + reject_reason="bad-txns-inputs-missingorspent", + reconnect=True, + ) self.move_tip(35) b39 = self.next_block(39) @@ -444,7 +473,7 @@ b44 = CBlock() b44.nTime = self.tip.nTime + 1 b44.hashPrevBlock = self.tip.sha256 - b44.nBits = 0x207fffff + b44.nBits = 0x207FFFFF b44.vtx.append(coinbase) b44.hashMerkleRoot = b44.calc_merkle_root() b44.solve() @@ -458,24 +487,24 @@ b45 = CBlock() b45.nTime = self.tip.nTime + 1 b45.hashPrevBlock = self.tip.sha256 - b45.nBits = 0x207fffff + b45.nBits = 0x207FFFFF b45.vtx.append(non_coinbase) b45.hashMerkleRoot = b45.calc_merkle_root() b45.calc_sha256() b45.solve() - self.block_heights[b45.sha256] = self.block_heights[ - self.tip.sha256] + 1 + self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256] + 1 self.tip = b45 self.blocks[45] = b45 - self.send_blocks([b45], success=False, - reject_reason='bad-cb-missing', reconnect=True) + self.send_blocks( + [b45], success=False, reject_reason="bad-cb-missing", reconnect=True + ) self.log.info("Reject a block with no transactions") self.move_tip(44) b46 = CBlock() b46.nTime = b44.nTime + 1 b46.hashPrevBlock = b44.sha256 - b46.nBits = 0x207fffff + b46.nBits = 0x207FFFFF b46.vtx = [] b46.hashMerkleRoot = 0 b46.solve() @@ -483,8 +512,9 @@ self.tip = b46 assert 46 not in self.blocks self.blocks[46] = b46 - self.send_blocks([b46], success=False, - reject_reason='bad-cb-missing', reconnect=True) + self.send_blocks( + [b46], success=False, reject_reason="bad-cb-missing", reconnect=True + ) self.log.info("Reject a block with invalid work") self.move_tip(44) @@ -495,11 +525,8 @@ b47.nNonce += 1 b47.rehash() self.send_blocks( - [b47], - False, - force_send=True, - reject_reason='high-hash', - reconnect=True) + [b47], False, force_send=True, reject_reason="high-hash", reconnect=True + ) self.log.info("Reject a block with a timestamp >2 hours in the future") self.move_tip(44) @@ -507,16 +534,16 @@ b48.nTime = int(time.time()) + 60 * 60 * 3 # Header timestamp has changed. Re-solve the block. b48.solve() - self.send_blocks([b48], False, force_send=True, - reject_reason='time-too-new') + self.send_blocks([b48], False, force_send=True, reject_reason="time-too-new") self.log.info("Reject a block with invalid merkle hash") self.move_tip(44) b49 = self.next_block(49) b49.hashMerkleRoot += 1 b49.solve() - self.send_blocks([b49], success=False, - reject_reason='bad-txnmrklroot', reconnect=True) + self.send_blocks( + [b49], success=False, reject_reason="bad-txnmrklroot", reconnect=True + ) self.log.info("Reject a block with incorrect POW limit") self.move_tip(44) @@ -524,26 +551,25 @@ b50.nBits = b50.nBits - 1 b50.solve() self.send_blocks( - [b50], - False, - force_send=True, - reject_reason='bad-diffbits', - reconnect=True) + [b50], False, force_send=True, reject_reason="bad-diffbits", reconnect=True + ) self.log.info("Reject a block with two coinbase transactions") self.move_tip(44) b51 = self.next_block(51) cb2 = create_coinbase(51, self.coinbase_pubkey) b51 = self.update_block(51, [cb2]) - self.send_blocks([b51], success=False, - reject_reason='bad-tx-coinbase', reconnect=True) + self.send_blocks( + [b51], success=False, reject_reason="bad-tx-coinbase", reconnect=True + ) self.log.info("Reject a block with duplicate transactions") self.move_tip(44) b52 = self.next_block(52, spend=out[15]) b52 = self.update_block(52, [b52.vtx[1]]) - self.send_blocks([b52], success=False, - reject_reason='tx-duplicate', reconnect=True) + self.send_blocks( + [b52], success=False, reject_reason="tx-duplicate", reconnect=True + ) # Test block timestamps # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) @@ -559,11 +585,8 @@ b54.nTime = b35.nTime - 1 b54.solve() self.send_blocks( - [b54], - False, - force_send=True, - reject_reason='time-too-old', - reconnect=True) + [b54], False, force_send=True, reject_reason="time-too-old", reconnect=True + ) # valid timestamp self.move_tip(53) @@ -609,15 +632,18 @@ # b56 - copy b57, add a duplicate tx self.log.info( - "Reject a block with a duplicate transaction in the Merkle Tree (but with a valid Merkle Root)") + "Reject a block with a duplicate transaction in the Merkle Tree (but with a" + " valid Merkle Root)" + ) self.move_tip(55) b56 = copy.deepcopy(b57) self.blocks[56] = b56 assert_equal(len(b56.vtx), 3) b56 = self.update_block(56, [b57.vtx[2]]) assert_equal(b56.hash, b57.hash) - self.send_blocks([b56], success=False, - reject_reason='bad-txns-duplicate', reconnect=True) + self.send_blocks( + [b56], success=False, reject_reason="bad-txns-duplicate", reconnect=True + ) # b57p2 - a good block with 6 tx'es, don't submit until end self.move_tip(55) @@ -631,15 +657,18 @@ # b56p2 - copy b57p2, duplicate two non-consecutive tx's self.log.info( - "Reject a block with two duplicate transactions in the Merkle Tree (but with a valid Merkle Root)") + "Reject a block with two duplicate transactions in the Merkle Tree (but" + " with a valid Merkle Root)" + ) self.move_tip(55) b56p2 = copy.deepcopy(b57p2) self.blocks["b56p2"] = b56p2 assert_equal(len(b56p2.vtx), 6) b56p2 = self.update_block("b56p2", b56p2.vtx[4:6], reorder=False) assert_equal(b56p2.hash, b57p2.hash) - self.send_blocks([b56p2], success=False, - reject_reason='bad-txns-duplicate', reconnect=True) + self.send_blocks( + [b56p2], success=False, reject_reason="bad-txns-duplicate", reconnect=True + ) self.move_tip("57p2") self.send_blocks([b57p2], True) @@ -656,30 +685,34 @@ # # tx with prevout.n out of range - self.log.info( - "Reject a block with a transaction with prevout.n out of range") + self.log.info("Reject a block with a transaction with prevout.n out of range") self.move_tip(57) b58 = self.next_block(58, spend=out[17]) tx = CTransaction() assert len(out[17].vout) < 42 tx.vin.append( - CTxIn(COutPoint(out[17].sha256, 42), CScript([OP_TRUE]), 0xffffffff)) + CTxIn(COutPoint(out[17].sha256, 42), CScript([OP_TRUE]), 0xFFFFFFFF) + ) tx.vout.append(CTxOut(0, b"")) pad_tx(tx) tx.calc_sha256() b58 = self.update_block(58, [tx]) - self.send_blocks([b58], success=False, - reject_reason='bad-txns-inputs-missingorspent', reconnect=True) + self.send_blocks( + [b58], + success=False, + reject_reason="bad-txns-inputs-missingorspent", + reconnect=True, + ) # tx with output value > input value - self.log.info( - "Reject a block with a transaction with outputs > inputs") + self.log.info("Reject a block with a transaction with outputs > inputs") self.move_tip(57) b59 = self.next_block(59) tx = self.create_and_sign_transaction(out[17], 51 * COIN) b59 = self.update_block(59, [tx]) - self.send_blocks([b59], success=False, - reject_reason='bad-txns-in-belowout', reconnect=True) + self.send_blocks( + [b59], success=False, reject_reason="bad-txns-in-belowout", reconnect=True + ) # reset to good chain self.move_tip(57) @@ -697,15 +730,18 @@ # the second one should be rejected. See also CVE-2012-1909. # self.log.info( - "Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)") + "Reject a block with a transaction with a duplicate hash of a previous" + " transaction (BIP30)" + ) self.move_tip(60) b61 = self.next_block(61) b61.vtx[0].vin[0].scriptSig = DUPLICATE_COINBASE_SCRIPT_SIG b61.vtx[0].rehash() b61 = self.update_block(61, []) assert_equal(duplicate_tx.serialize(), b61.vtx[0].serialize()) - self.send_blocks([b61], success=False, - reject_reason='bad-txns-BIP30', reconnect=True) + self.send_blocks( + [b61], success=False, reject_reason="bad-txns-BIP30", reconnect=True + ) # Test BIP30 (allow duplicate if spent) # @@ -713,54 +749,47 @@ # \-> b_spend_dup_cb (b_dup_cb) -> b_dup_2 () # self.move_tip(57) - b_spend_dup_cb = self.next_block('spend_dup_cb') + b_spend_dup_cb = self.next_block("spend_dup_cb") tx = CTransaction() tx.vin.append(CTxIn(COutPoint(duplicate_tx.sha256, 0))) tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) self.sign_tx(tx, duplicate_tx) tx.rehash() - b_spend_dup_cb = self.update_block('spend_dup_cb', [tx]) + b_spend_dup_cb = self.update_block("spend_dup_cb", [tx]) - b_dup_2 = self.next_block('dup_2') + b_dup_2 = self.next_block("dup_2") b_dup_2.vtx[0].vin[0].scriptSig = DUPLICATE_COINBASE_SCRIPT_SIG b_dup_2.vtx[0].rehash() - b_dup_2 = self.update_block('dup_2', []) + b_dup_2 = self.update_block("dup_2", []) assert_equal(duplicate_tx.serialize(), b_dup_2.vtx[0].serialize()) assert_equal( - self.nodes[0].gettxout( - txid=duplicate_tx.hash, - n=0)['confirmations'], - 119) + self.nodes[0].gettxout(txid=duplicate_tx.hash, n=0)["confirmations"], 119 + ) self.send_blocks([b_spend_dup_cb, b_dup_2], success=True) # The duplicate has less confirmations assert_equal( - self.nodes[0].gettxout( - txid=duplicate_tx.hash, - n=0)['confirmations'], - 1) + self.nodes[0].gettxout(txid=duplicate_tx.hash, n=0)["confirmations"], 1 + ) # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) # # -> b_spend_dup_cb (b_dup_cb) -> b_dup_2 () # \-> b62 (18) # - self.log.info( - "Reject a block with a transaction with a nonfinal locktime") - self.move_tip('dup_2') + self.log.info("Reject a block with a transaction with a nonfinal locktime") + self.move_tip("dup_2") b62 = self.next_block(62) tx = CTransaction() - tx.nLockTime = 0xffffffff # this locktime is non-final + tx.nLockTime = 0xFFFFFFFF # this locktime is non-final # don't set nSequence tx.vin.append(CTxIn(COutPoint(out[18].sha256, 0))) tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) - assert tx.vin[0].nSequence < 0xffffffff + assert tx.vin[0].nSequence < 0xFFFFFFFF tx.calc_sha256() b62 = self.update_block(62, [tx]) self.send_blocks( - [b62], - success=False, - reject_reason='bad-txns-nonfinal', - reconnect=True) + [b62], success=False, reject_reason="bad-txns-nonfinal", reconnect=True + ) # Test a non-final coinbase is also rejected # @@ -768,18 +797,17 @@ # \-> b63 (-) # self.log.info( - "Reject a block with a coinbase transaction with a nonfinal locktime") - self.move_tip('dup_2') + "Reject a block with a coinbase transaction with a nonfinal locktime" + ) + self.move_tip("dup_2") b63 = self.next_block(63) - b63.vtx[0].nLockTime = 0xffffffff + b63.vtx[0].nLockTime = 0xFFFFFFFF b63.vtx[0].vin[0].nSequence = 0xDEADBEEF b63.vtx[0].rehash() b63 = self.update_block(63, []) self.send_blocks( - [b63], - success=False, - reject_reason='bad-txns-nonfinal', - reconnect=True) + [b63], success=False, reject_reason="bad-txns-nonfinal", reconnect=True + ) # This checks that a block with a bloated VARINT between the block_header and the array of tx such that # the block is > LEGACY_MAX_BLOCK_SIZE with the bloated varint, but <= LEGACY_MAX_BLOCK_SIZE without the bloated varint, @@ -796,8 +824,10 @@ # b64 is a good block (same as b64 but w/ canonical varint) # self.log.info( - "Accept a valid block even if a bloated version of the block has previously been sent") - self.move_tip('dup_2') + "Accept a valid block even if a bloated version of the block has previously" + " been sent" + ) + self.move_tip("dup_2") regular_block = self.next_block("64a", spend=out[18]) # make it a "broken_block," with non-canonical serialization @@ -808,15 +838,15 @@ tx = CTransaction() # use canonical serialization to calculate size - script_length = LEGACY_MAX_BLOCK_SIZE - \ - len(b64a.normal_serialize()) - 69 - script_output = CScript([b'\x00' * script_length]) + script_length = LEGACY_MAX_BLOCK_SIZE - len(b64a.normal_serialize()) - 69 + script_output = CScript([b"\x00" * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) b64a = self.update_block("64a", [tx]) assert_equal(len(b64a.serialize()), LEGACY_MAX_BLOCK_SIZE + 8) - self.send_blocks([b64a], success=False, - reject_reason='non-canonical ReadCompactSize()') + self.send_blocks( + [b64a], success=False, reject_reason="non-canonical ReadCompactSize()" + ) # bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently # resend the header message, it won't send us the getdata message again. Just @@ -825,7 +855,7 @@ node.disconnect_p2ps() self.reconnect_p2p() - self.move_tip('dup_2') + self.move_tip("dup_2") b64 = CBlock(b64a) b64.vtx = copy.deepcopy(b64a.vtx) assert_equal(b64.hash, b64a.hash) @@ -840,7 +870,9 @@ # -> b_dup_2 () -> b64 (18) -> b65 (19) # self.log.info( - "Accept a block with a transaction spending an output created in the same block") + "Accept a block with a transaction spending an output created in the same" + " block" + ) self.move_tip(64) b65 = self.next_block(65) tx1 = self.create_and_sign_transaction(out[19], out[19].vout[0].nValue) @@ -856,15 +888,21 @@ # # self.log.info( - "Reject a block with a transaction double spending a transaction created in the same block") + "Reject a block with a transaction double spending a transaction created in" + " the same block" + ) self.move_tip(65) b67 = self.next_block(67) tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue) tx2 = self.create_and_sign_transaction(tx1, 1) tx3 = self.create_and_sign_transaction(tx1, 2) b67 = self.update_block(67, [tx1, tx2, tx3]) - self.send_blocks([b67], success=False, - reject_reason='bad-txns-inputs-missingorspent', reconnect=True) + self.send_blocks( + [b67], + success=False, + reject_reason="bad-txns-inputs-missingorspent", + reconnect=True, + ) # More tests of block subsidy # @@ -879,21 +917,23 @@ # this succeeds # self.log.info( - "Reject a block trying to claim too much subsidy in the coinbase transaction") + "Reject a block trying to claim too much subsidy in the coinbase" + " transaction" + ) self.move_tip(65) b68 = self.next_block(68, additional_coinbase_value=10) - tx = self.create_and_sign_transaction( - out[20], out[20].vout[0].nValue - 9) + tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue - 9) b68 = self.update_block(68, [tx]) - self.send_blocks([b68], success=False, - reject_reason='bad-cb-amount', reconnect=True) + self.send_blocks( + [b68], success=False, reject_reason="bad-cb-amount", reconnect=True + ) self.log.info( - "Accept a block claiming the correct subsidy in the coinbase transaction") + "Accept a block claiming the correct subsidy in the coinbase transaction" + ) self.move_tip(65) b69 = self.next_block(69, additional_coinbase_value=10) - tx = self.create_and_sign_transaction( - out[20], out[20].vout[0].nValue - 10) + tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue - 10) self.update_block(69, [tx]) self.send_blocks([b69], True) self.save_spendable_output() @@ -904,19 +944,25 @@ # \-> b70 (21) # self.log.info( - "Reject a block containing a transaction spending from a non-existent input") + "Reject a block containing a transaction spending from a non-existent input" + ) self.move_tip(69) b70 = self.next_block(70, spend=out[21]) bogus_tx = CTransaction() bogus_tx.sha256 = uint256_from_str( - b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c") + b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c" + ) tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff)) + tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xFFFFFFFF)) tx.vout.append(CTxOut(1, b"")) pad_tx(tx) b70 = self.update_block(70, [tx]) - self.send_blocks([b70], success=False, - reject_reason='bad-txns-inputs-missingorspent', reconnect=True) + self.send_blocks( + [b70], + success=False, + reject_reason="bad-txns-inputs-missingorspent", + reconnect=True, + ) # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) # @@ -927,7 +973,9 @@ # b71 is a copy of 72, but re-adds one of its transactions. However, # it has the same hash as b72. self.log.info( - "Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability") + "Reject a block containing a duplicate transaction but with the same Merkle" + " root (Merkle tree malleability" + ) self.move_tip(69) b72 = self.next_block(72) tx1 = self.create_and_sign_transaction(out[21], 2) @@ -945,8 +993,9 @@ assert_equal(b72.sha256, b71.sha256) self.move_tip(71) - self.send_blocks([b71], success=False, - reject_reason='bad-txns-duplicate', reconnect=True) + self.send_blocks( + [b71], success=False, reject_reason="bad-txns-duplicate", reconnect=True + ) self.move_tip(72) self.send_blocks([b72], True) @@ -1022,13 +1071,11 @@ # # -> b81 (26) -> b82 (27) -> b83 (28) # - self.log.info( - "Accept a block with invalid opcodes in dead execution paths") + self.log.info("Accept a block with invalid opcodes in dead execution paths") b83 = self.next_block(83) op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF] script = CScript(op_codes) - tx1 = self.create_and_sign_transaction( - out[28], out[28].vout[0].nValue, script) + tx1 = self.create_and_sign_transaction(out[28], out[28].vout[0].nValue, script) tx2 = self.create_and_sign_transaction(tx1, 0, CScript([OP_TRUE])) tx2.vin[0].scriptSig = CScript([OP_FALSE]) @@ -1086,11 +1133,14 @@ b89a = self.next_block("89a", spend=out[32]) tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE])) b89a = self.update_block("89a", [tx]) - self.send_blocks([b89a], success=False, - reject_reason='bad-txns-inputs-missingorspent', reconnect=True) + self.send_blocks( + [b89a], + success=False, + reject_reason="bad-txns-inputs-missingorspent", + reconnect=True, + ) - self.log.info( - "Test a re-org of one week's worth of blocks (1088 blocks)") + self.log.info("Test a re-org of one week's worth of blocks (1088 blocks)") self.move_tip(88) LARGE_REORG_SIZE = 1088 @@ -1100,7 +1150,7 @@ b = self.next_block(i, spend) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b.serialize()) - 69 - script_output = CScript([b'\x00' * script_length]) + script_output = CScript([b"\x00" * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) b = self.update_block(i, [tx]) @@ -1131,25 +1181,24 @@ self.send_blocks([block], True, timeout=2440) self.log.info("Reject a block with an invalid block header version") - b_v1 = self.next_block('b_v1', version=1) + b_v1 = self.next_block("b_v1", version=1) self.send_blocks( [b_v1], success=False, force_send=True, - reject_reason='bad-version(0x00000001)', - reconnect=True) + reject_reason="bad-version(0x00000001)", + reconnect=True, + ) self.move_tip(chain1_tip + 2) - b_cb34 = self.next_block('b_cb34') + b_cb34 = self.next_block("b_cb34") b_cb34.vtx[0].vin[0].scriptSig = b_cb34.vtx[0].vin[0].scriptSig[:-1] b_cb34.vtx[0].rehash() b_cb34.hashMerkleRoot = b_cb34.calc_merkle_root() b_cb34.solve() self.send_blocks( - [b_cb34], - success=False, - reject_reason='bad-cb-height', - reconnect=True) + [b_cb34], success=False, reject_reason="bad-cb-height", reconnect=True + ) # Helper methods ################ @@ -1160,31 +1209,45 @@ # this is a little handier to use than the version in blocktools.py def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): - return create_tx_with_script( - spend_tx, n, amount=value, script_pub_key=script) + return create_tx_with_script(spend_tx, n, amount=value, script_pub_key=script) # sign a transaction, using the key we know about # this signs input 0 in tx, which is assumed to be spending output n in # spend_tx def sign_tx(self, tx, spend_tx): scriptPubKey = bytearray(spend_tx.vout[0].scriptPubKey) - if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend + if scriptPubKey[0] == OP_TRUE: # an anyone-can-spend tx.vin[0].scriptSig = CScript() return sighash = SignatureHashForkId( - spend_tx.vout[0].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[0].nValue) + spend_tx.vout[0].scriptPubKey, + tx, + 0, + SIGHASH_ALL | SIGHASH_FORKID, + spend_tx.vout[0].nValue, + ) tx.vin[0].scriptSig = CScript( - [self.coinbase_key.sign_ecdsa(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) + [ + self.coinbase_key.sign_ecdsa(sighash) + + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) + ] + ) - def create_and_sign_transaction( - self, spend_tx, value, script=CScript([OP_TRUE])): + def create_and_sign_transaction(self, spend_tx, value, script=CScript([OP_TRUE])): tx = self.create_tx(spend_tx, 0, value, script) self.sign_tx(tx, spend_tx) tx.rehash() return tx - def next_block(self, number, spend=None, additional_coinbase_value=0, - script=CScript([OP_TRUE]), *, version=4): + def next_block( + self, + number, + spend=None, + additional_coinbase_value=0, + script=CScript([OP_TRUE]), + *, + version=4, + ): if self.tip is None: base_block_hash = self.genesis_hash block_time = int(time.time()) + 1 @@ -1197,20 +1260,12 @@ coinbase.vout[0].nValue += additional_coinbase_value coinbase.rehash() if spend is None: - block = create_block( - base_block_hash, - coinbase, - block_time, - version=version) + block = create_block(base_block_hash, coinbase, block_time, version=version) else: # all but one satoshi to fees coinbase.vout[0].nValue += spend.vout[0].nValue - 1 coinbase.rehash() - block = create_block( - base_block_hash, - coinbase, - block_time, - version=version) + block = create_block(base_block_hash, coinbase, block_time, version=version) # spend 1 satoshi tx = self.create_tx(spend, 0, 1, script) self.sign_tx(tx, spend) @@ -1276,17 +1331,32 @@ self.nodes[0].disconnect_p2ps() self.bootstrap_p2p(timeout=timeout) - def send_blocks(self, blocks, success=True, reject_reason=None, - force_send=False, reconnect=False, timeout=60): + def send_blocks( + self, + blocks, + success=True, + reject_reason=None, + force_send=False, + reconnect=False, + timeout=60, + ): """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. - Call with success = False if the tip shouldn't advance to the most recent block.""" - self.helper_peer.send_blocks_and_test(blocks, self.nodes[0], success=success, - reject_reason=reject_reason, force_send=force_send, timeout=timeout, expect_disconnect=reconnect) + Call with success = False if the tip shouldn't advance to the most recent block. + """ + self.helper_peer.send_blocks_and_test( + blocks, + self.nodes[0], + success=success, + reject_reason=reject_reason, + force_send=force_send, + timeout=timeout, + expect_disconnect=reconnect, + ) if reconnect: self.reconnect_p2p(timeout=timeout) -if __name__ == '__main__': +if __name__ == "__main__": FullBlockTest().main() diff --git a/test/functional/feature_blockfilterindex_prune.py b/test/functional/feature_blockfilterindex_prune.py --- a/test/functional/feature_blockfilterindex_prune.py +++ b/test/functional/feature_blockfilterindex_prune.py @@ -18,22 +18,21 @@ def sync_index(self, height): expected = { - 'basic block filter index': { - 'synced': True, - 'best_block_height': height - } + "basic block filter index": {"synced": True, "best_block_height": height} } self.wait_until(lambda: self.nodes[0].getindexinfo() == expected) def run_test(self): node = self.nodes[0] - self.log.info("check if we can access a blockfilter when pruning is " - "enabled but no blocks are actually pruned") + self.log.info( + "check if we can access a blockfilter when pruning is " + "enabled but no blocks are actually pruned" + ) self.sync_index(200) assert_greater_than( - len(node.getblockfilter(node.getbestblockhash())['filter']), - 0) + len(node.getblockfilter(node.getbestblockhash())["filter"]), 0 + ) self.generate(node, 500) self.sync_index(height=700) @@ -47,17 +46,16 @@ # transactions for core. assert_equal(pruneheight, 346) - self.log.info("check if we can access the tips blockfilter when we have" - " pruned some blocks") + self.log.info( + "check if we can access the tips blockfilter when we have" + " pruned some blocks" + ) assert_greater_than( - len(node.getblockfilter(node.getbestblockhash())['filter']), - 0) + len(node.getblockfilter(node.getbestblockhash())["filter"]), 0 + ) - self.log.info("check if we can access the blockfilter of a pruned " - "block") - assert_greater_than( - len(node.getblockfilter(node.getblockhash(2))['filter']), - 0) + self.log.info("check if we can access the blockfilter of a pruned block") + assert_greater_than(len(node.getblockfilter(node.getblockhash(2))["filter"]), 0) # mine and sync index up to a height that will later be the pruneheight self.generate(node, 338) @@ -69,44 +67,57 @@ self.log.info("make sure accessing the blockfilters throws an error") assert_raises_rpc_error( - -1, "Index is not enabled for filtertype basic", - node.getblockfilter, node.getblockhash(2)) + -1, + "Index is not enabled for filtertype basic", + node.getblockfilter, + node.getblockhash(2), + ) self.generate(node, 462) - self.log.info("prune exactly up to the blockfilterindexes best block " - "while blockfilters are disabled") + self.log.info( + "prune exactly up to the blockfilterindexes best block " + "while blockfilters are disabled" + ) pruneheight_2 = self.nodes[0].pruneblockchain(1040) assert_equal(pruneheight_2, 1038) self.restart_node( - 0, extra_args=["-fastprune", "-prune=1", "-blockfilterindex=1"]) - self.log.info("make sure that we can continue with the partially synced" - " index after having pruned up to the index height") + 0, extra_args=["-fastprune", "-prune=1", "-blockfilterindex=1"] + ) + self.log.info( + "make sure that we can continue with the partially synced" + " index after having pruned up to the index height" + ) self.sync_index(height=1500) - self.log.info("prune below the blockfilterindexes best block while " - "blockfilters are disabled") - self.restart_node( - 0, - extra_args=["-fastprune", "-prune=1"]) + self.log.info( + "prune below the blockfilterindexes best block while " + "blockfilters are disabled" + ) + self.restart_node(0, extra_args=["-fastprune", "-prune=1"]) self.generate(node, 1000) pruneheight_3 = self.nodes[0].pruneblockchain(2000) assert_greater_than(pruneheight_3, pruneheight_2) self.stop_node(0) - self.log.info("make sure we get an init error when starting the node " - "again with block filters") + self.log.info( + "make sure we get an init error when starting the node " + "again with block filters" + ) with node.assert_debug_log( - ["basic block filter index best block of the index goes beyond " - "pruned data. Please disable the index or reindex (which will " - "download the whole blockchain again)"]): + [ + "basic block filter index best block of the index goes beyond " + "pruned data. Please disable the index or reindex (which will " + "download the whole blockchain again)" + ] + ): node.assert_start_raises_init_error( - extra_args=["-fastprune", "-prune=1", "-blockfilterindex=1"]) + extra_args=["-fastprune", "-prune=1", "-blockfilterindex=1"] + ) self.log.info("make sure the node starts again with the -reindex arg") self.start_node( - 0, - extra_args=["-fastprune", "-prune=1", "-blockfilterindex", - "-reindex"]) + 0, extra_args=["-fastprune", "-prune=1", "-blockfilterindex", "-reindex"] + ) -if __name__ == '__main__': +if __name__ == "__main__": FeatureBlockfilterindexPruneTest().main() diff --git a/test/functional/feature_blocksdir.py b/test/functional/feature_blocksdir.py --- a/test/functional/feature_blocksdir.py +++ b/test/functional/feature_blocksdir.py @@ -18,27 +18,30 @@ def run_test(self): self.stop_node(0) - assert os.path.isdir(os.path.join( - self.nodes[0].datadir, self.chain, "blocks")) + assert os.path.isdir(os.path.join(self.nodes[0].datadir, self.chain, "blocks")) assert not os.path.isdir(os.path.join(self.nodes[0].datadir, "blocks")) shutil.rmtree(self.nodes[0].datadir) initialize_datadir(self.options.tmpdir, 0, self.chain) self.log.info("Starting with nonexistent blocksdir ...") - blocksdir_path = os.path.join(self.options.tmpdir, 'blocksdir') + blocksdir_path = os.path.join(self.options.tmpdir, "blocksdir") self.nodes[0].assert_start_raises_init_error( [f"-blocksdir={blocksdir_path}"], - f'Error: Specified blocks directory "{blocksdir_path}" does not exist.') + f'Error: Specified blocks directory "{blocksdir_path}" does not exist.', + ) os.mkdir(blocksdir_path) self.log.info("Starting with existing blocksdir ...") self.start_node(0, [f"-blocksdir={blocksdir_path}"]) self.log.info("mining blocks..") - self.generatetoaddress(self.nodes[0], - 10, self.nodes[0].get_deterministic_priv_key().address) - assert os.path.isfile(os.path.join( - blocksdir_path, self.chain, "blocks", "blk00000.dat")) - assert os.path.isdir(os.path.join( - self.nodes[0].datadir, self.chain, "blocks", "index")) + self.generatetoaddress( + self.nodes[0], 10, self.nodes[0].get_deterministic_priv_key().address + ) + assert os.path.isfile( + os.path.join(blocksdir_path, self.chain, "blocks", "blk00000.dat") + ) + assert os.path.isdir( + os.path.join(self.nodes[0].datadir, self.chain, "blocks", "index") + ) -if __name__ == '__main__': +if __name__ == "__main__": BlocksdirTest().main() diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py --- a/test/functional/feature_cltv.py +++ b/test/functional/feature_cltv.py @@ -33,7 +33,7 @@ def cltv_lock_to_height(wallet, from_node, fundtx, height=-1): - '''Modify the scriptPubKey to add an OP_CHECKLOCKTIMEVERIFY, and make + """Modify the scriptPubKey to add an OP_CHECKLOCKTIMEVERIFY, and make a transaction that spends it. This transforms the output script to anyone can spend (OP_TRUE) if the @@ -43,7 +43,7 @@ TODO: test more ways that transactions using CLTV could be invalid (eg locktime requirements fail, sequence time requirements fail, etc). - ''' + """ assert_equal(len(fundtx.vin), 1) height_op = OP_1NEGATE if height > 0: @@ -52,17 +52,16 @@ height_op = CScriptNum(height) fundtx.vout[0].scriptPubKey = CScript( - [height_op, OP_CHECKLOCKTIMEVERIFY, OP_DROP, OP_TRUE]) + [height_op, OP_CHECKLOCKTIMEVERIFY, OP_DROP, OP_TRUE] + ) pad_tx(fundtx) spendtx = create_tx_with_script( fundtx, 0, - amount=( - fundtx.vout[0].nValue - - 10000), - script_pub_key=CScript( - [OP_TRUE])) + amount=(fundtx.vout[0].nValue - 10000), + script_pub_key=CScript([OP_TRUE]), + ) return fundtx, spendtx @@ -70,11 +69,13 @@ class BIP65Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 - self.extra_args = [[ - '-whitelist=noban@127.0.0.1', - '-par=1', # Use only one script thread to get the exact reject reason for testing - '-acceptnonstdtxn=1', # cltv_invalidate is nonstandard - ]] + self.extra_args = [ + [ + "-whitelist=noban@127.0.0.1", + "-par=1", # Use only one script thread to get the exact reject reason for testing + "-acceptnonstdtxn=1", # cltv_invalidate is nonstandard + ] + ] self.setup_clean_chain = True self.rpc_timeout = 120 @@ -87,15 +88,16 @@ self.generate(self.nodes[0], CLTV_HEIGHT - 2 - 10) self.log.info( - "Test that an invalid-according-to-CLTV transaction can still appear in a block") + "Test that an invalid-according-to-CLTV transaction can still appear in a" + " block" + ) - fundtx = wallet.create_self_transfer(from_node=self.nodes[0])['tx'] + fundtx = wallet.create_self_transfer(from_node=self.nodes[0])["tx"] fundtx, spendtx = cltv_lock_to_height(wallet, self.nodes[0], fundtx) tip = self.nodes[0].getbestblockhash() - block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 - block = create_block(int(tip, 16), create_coinbase( - CLTV_HEIGHT - 1), block_time) + block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1 + block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time) block.nVersion = 3 block.vtx.append(fundtx) # include the -1 CLTV in block @@ -115,16 +117,19 @@ block.nVersion = 3 block.solve() - with self.nodes[0].assert_debug_log(expected_msgs=[f'{block.hash}, bad-version(0x00000003)']): + with self.nodes[0].assert_debug_log( + expected_msgs=[f"{block.hash}, bad-version(0x00000003)"] + ): peer.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) peer.sync_with_ping() self.log.info( - "Test that invalid-according-to-cltv transactions cannot appear in a block") + "Test that invalid-according-to-cltv transactions cannot appear in a block" + ) block.nVersion = 4 - fundtx = wallet.create_self_transfer(from_node=self.nodes[0])['tx'] + fundtx = wallet.create_self_transfer(from_node=self.nodes[0])["tx"] fundtx, spendtx = cltv_lock_to_height(wallet, self.nodes[0], fundtx) # The funding tx only has unexecuted bad CLTV, in scriptpubkey; this is @@ -144,43 +149,53 @@ # We show that this tx is invalid due to CLTV by getting it # rejected from the mempool for exactly that reason. assert_equal( - [{'txid': spendtx.hash, 'allowed': False, - 'reject-reason': 'non-mandatory-script-verify-flag (Negative locktime)'}], + [ + { + "txid": spendtx.hash, + "allowed": False, + "reject-reason": ( + "non-mandatory-script-verify-flag (Negative locktime)" + ), + } + ], self.nodes[0].testmempoolaccept( - rawtxs=[spendtx.serialize().hex()], maxfeerate=0) + rawtxs=[spendtx.serialize().hex()], maxfeerate=0 + ), ) tip = block.hash block_time += 1 - block = create_block( - block.sha256, create_coinbase(CLTV_HEIGHT + 1), block_time) + block = create_block(block.sha256, create_coinbase(CLTV_HEIGHT + 1), block_time) block.nVersion = 4 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() with self.nodes[0].assert_debug_log( - expected_msgs=[f'ConnectBlock {block.hash} failed, blk-bad-inputs']): + expected_msgs=[f"ConnectBlock {block.hash} failed, blk-bad-inputs"] + ): peer.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) peer.sync_with_ping() self.log.info( - "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted") + "Test that a version 4 block with a valid-according-to-CLTV transaction is" + " accepted" + ) - fundtx = wallet.create_self_transfer(from_node=self.nodes[0])['tx'] + fundtx = wallet.create_self_transfer(from_node=self.nodes[0])["tx"] fundtx, spendtx = cltv_lock_to_height( - wallet, self.nodes[0], fundtx, height=CLTV_HEIGHT) + wallet, self.nodes[0], fundtx, height=CLTV_HEIGHT + ) # make sure sequence is nonfinal and locktime is good - spendtx.vin[0].nSequence = 0xfffffffe + spendtx.vin[0].nSequence = 0xFFFFFFFE spendtx.nLockTime = CLTV_HEIGHT # both transactions are fully valid self.nodes[0].testmempoolaccept( - rawtxs=[ - fundtx.serialize().hex(), - spendtx.serialize().hex()]) + rawtxs=[fundtx.serialize().hex(), spendtx.serialize().hex()] + ) # Modify the transactions in the block to be valid against CLTV block.vtx.pop(1) @@ -195,5 +210,5 @@ assert_equal(self.nodes[0].getbestblockhash(), block.hash) -if __name__ == '__main__': +if __name__ == "__main__": BIP65Test().main() diff --git a/test/functional/feature_coinstatsindex.py b/test/functional/feature_coinstatsindex.py --- a/test/functional/feature_coinstatsindex.py +++ b/test/functional/feature_coinstatsindex.py @@ -30,7 +30,7 @@ ], [ "-coinstatsindex", - ] + ], ] def run_test(self): @@ -43,16 +43,17 @@ def block_sanity_check(self, block_info): block_subsidy = 50_000_000 assert_equal( - block_info['prevout_spent'] + block_subsidy, - block_info['new_outputs_ex_coinbase'] + block_info['coinbase'] - + block_info['unspendable'] + block_info["prevout_spent"] + block_subsidy, + block_info["new_outputs_ex_coinbase"] + + block_info["coinbase"] + + block_info["unspendable"], ) def _test_coin_stats_index(self): node = self.nodes[0] index_node = self.nodes[1] # Both none and muhash options allow the usage of the index - index_hash_options = ['none', 'muhash'] + index_hash_options = ["none", "muhash"] # Generate a normal transaction and mine it self.generate(self.wallet, 101) @@ -60,92 +61,109 @@ self.generate(node, 1) self.log.info( - "Test that gettxoutsetinfo() output is consistent with or without coinstatsindex option") - self.wait_until(lambda: not try_rpc(-32603, - "Unable to read UTXO set", node.gettxoutsetinfo)) - res0 = node.gettxoutsetinfo('none') + "Test that gettxoutsetinfo() output is consistent with or without" + " coinstatsindex option" + ) + self.wait_until( + lambda: not try_rpc(-32603, "Unable to read UTXO set", node.gettxoutsetinfo) + ) + res0 = node.gettxoutsetinfo("none") # The fields 'disk_size' and 'transactions' do not exist on the index - del res0['disk_size'], res0['transactions'] + del res0["disk_size"], res0["transactions"] - self.wait_until(lambda: not try_rpc(-32603, - "Unable to read UTXO set", - index_node.gettxoutsetinfo, - 'muhash')) + self.wait_until( + lambda: not try_rpc( + -32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, "muhash" + ) + ) for hash_option in index_hash_options: res1 = index_node.gettxoutsetinfo(hash_option) # The fields 'block_info' and 'total_unspendable_amount' only exist # on the index - del res1['block_info'], res1['total_unspendable_amount'] - res1.pop('muhash', None) + del res1["block_info"], res1["total_unspendable_amount"] + res1.pop("muhash", None) # Everything left should be the same assert_equal(res1, res0) self.log.info( "Test that gettxoutsetinfo() can get fetch data on specific " - "heights with index") + "heights with index" + ) # Generate a new tip self.generate(node, 5) - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", - index_node.gettxoutsetinfo, - 'muhash')) + self.wait_until( + lambda: not try_rpc( + -32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, "muhash" + ) + ) for hash_option in index_hash_options: # Fetch old stats by height res2 = index_node.gettxoutsetinfo(hash_option, 102) - del res2['block_info'], res2['total_unspendable_amount'] - res2.pop('muhash', None) + del res2["block_info"], res2["total_unspendable_amount"] + res2.pop("muhash", None) assert_equal(res0, res2) # Fetch old stats by hash - res3 = index_node.gettxoutsetinfo(hash_option, res0['bestblock']) - del res3['block_info'], res3['total_unspendable_amount'] - res3.pop('muhash', None) + res3 = index_node.gettxoutsetinfo(hash_option, res0["bestblock"]) + del res3["block_info"], res3["total_unspendable_amount"] + res3.pop("muhash", None) assert_equal(res0, res3) # It does not work without coinstatsindex assert_raises_rpc_error( - -8, "Querying specific block heights requires coinstatsindex", - node.gettxoutsetinfo, hash_option, 102) + -8, + "Querying specific block heights requires coinstatsindex", + node.gettxoutsetinfo, + hash_option, + 102, + ) self.log.info("Test gettxoutsetinfo() with index and verbose flag") for hash_option in index_hash_options: # Genesis block is unspendable res4 = index_node.gettxoutsetinfo(hash_option, 0) - assert_equal(res4['total_unspendable_amount'], 50_000_000) - assert_equal(res4['block_info'], { - 'unspendable': 50_000_000, - 'prevout_spent': 0, - 'new_outputs_ex_coinbase': 0, - 'coinbase': 0, - 'unspendables': { - 'genesis_block': 50_000_000, - 'bip30': 0, - 'scripts': 0, - 'unclaimed_rewards': 0 - } - }) - self.block_sanity_check(res4['block_info']) + assert_equal(res4["total_unspendable_amount"], 50_000_000) + assert_equal( + res4["block_info"], + { + "unspendable": 50_000_000, + "prevout_spent": 0, + "new_outputs_ex_coinbase": 0, + "coinbase": 0, + "unspendables": { + "genesis_block": 50_000_000, + "bip30": 0, + "scripts": 0, + "unclaimed_rewards": 0, + }, + }, + ) + self.block_sanity_check(res4["block_info"]) # Test an older block height that included a normal tx res5 = index_node.gettxoutsetinfo(hash_option, 102) - assert_equal(res5['total_unspendable_amount'], 50_000_000) - assert_equal(res5['block_info'], { - 'unspendable': 0, - 'prevout_spent': 50_000_000, - 'new_outputs_ex_coinbase': Decimal('49999700.00'), - 'coinbase': Decimal('50000300.00'), - 'unspendables': { - 'genesis_block': 0, - 'bip30': 0, - 'scripts': 0, - 'unclaimed_rewards': 0, - } - }) - self.block_sanity_check(res5['block_info']) + assert_equal(res5["total_unspendable_amount"], 50_000_000) + assert_equal( + res5["block_info"], + { + "unspendable": 0, + "prevout_spent": 50_000_000, + "new_outputs_ex_coinbase": Decimal("49999700.00"), + "coinbase": Decimal("50000300.00"), + "unspendables": { + "genesis_block": 0, + "bip30": 0, + "scripts": 0, + "unclaimed_rewards": 0, + }, + }, + ) + self.block_sanity_check(res5["block_info"]) # Generate and send a normal tx with two outputs tx1_txid, tx1_vout = self.wallet.send_to( @@ -160,36 +178,42 @@ # Generate and send another tx with an OP_RETURN output (which is # unspendable) tx2 = self.wallet.create_self_transfer( - from_node=self.nodes[0], utxo_to_spend=tx1_out_21)['tx'] - tx2.vout = [CTxOut(int(20_990_000 * XEC), - CScript([OP_RETURN] + [OP_FALSE] * 50))] + from_node=self.nodes[0], utxo_to_spend=tx1_out_21 + )["tx"] + tx2.vout = [ + CTxOut(int(20_990_000 * XEC), CScript([OP_RETURN] + [OP_FALSE] * 50)) + ] tx2_hex = tx2.serialize().hex() self.nodes[0].sendrawtransaction(tx2_hex) # Include both txs in a block self.generate(self.nodes[0], 1) - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", - index_node.gettxoutsetinfo, 'muhash')) + self.wait_until( + lambda: not try_rpc( + -32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, "muhash" + ) + ) for hash_option in index_hash_options: # Check all amounts were registered correctly res6 = index_node.gettxoutsetinfo(hash_option, 108) + assert_equal(res6["total_unspendable_amount"], Decimal("70990000.00")) assert_equal( - res6['total_unspendable_amount'], - Decimal('70990000.00')) - assert_equal(res6['block_info'], { - 'unspendable': Decimal('20990000.00'), - 'prevout_spent': 71_000_000, - 'new_outputs_ex_coinbase': Decimal('49999990.00'), - 'coinbase': Decimal('50010010.00'), - 'unspendables': { - 'genesis_block': 0, - 'bip30': 0, - 'scripts': Decimal('20990000.00'), - 'unclaimed_rewards': 0, - } - }) - self.block_sanity_check(res6['block_info']) + res6["block_info"], + { + "unspendable": Decimal("20990000.00"), + "prevout_spent": 71_000_000, + "new_outputs_ex_coinbase": Decimal("49999990.00"), + "coinbase": Decimal("50010010.00"), + "unspendables": { + "genesis_block": 0, + "bip30": 0, + "scripts": Decimal("20990000.00"), + "unclaimed_rewards": 0, + }, + }, + ) + self.block_sanity_check(res6["block_info"]) # Create a coinbase that does not claim full subsidy and also # has two outputs @@ -199,55 +223,63 @@ # Generate a block that includes previous coinbase tip = self.nodes[0].getbestblockhash() - block_time = self.nodes[0].getblock(tip)['time'] + 1 + block_time = self.nodes[0].getblock(tip)["time"] + 1 block = create_block(int(tip, 16), cb, block_time) block.solve() self.nodes[0].submitblock(ToHex(block)) self.sync_all() - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", - index_node.gettxoutsetinfo, 'muhash')) + self.wait_until( + lambda: not try_rpc( + -32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, "muhash" + ) + ) for hash_option in index_hash_options: res7 = index_node.gettxoutsetinfo(hash_option, 109) + assert_equal(res7["total_unspendable_amount"], Decimal("80990000.00")) assert_equal( - res7['total_unspendable_amount'], - Decimal('80990000.00')) - assert_equal(res7['block_info'], { - 'unspendable': 10_000_000, - 'prevout_spent': 0, - 'new_outputs_ex_coinbase': 0, - 'coinbase': 40_000_000, - 'unspendables': { - 'genesis_block': 0, - 'bip30': 0, - 'scripts': 0, - 'unclaimed_rewards': 10_000_000 - } - }) - self.block_sanity_check(res7['block_info']) + res7["block_info"], + { + "unspendable": 10_000_000, + "prevout_spent": 0, + "new_outputs_ex_coinbase": 0, + "coinbase": 40_000_000, + "unspendables": { + "genesis_block": 0, + "bip30": 0, + "scripts": 0, + "unclaimed_rewards": 10_000_000, + }, + }, + ) + self.block_sanity_check(res7["block_info"]) self.log.info("Test that the index is robust across restarts") - res8 = index_node.gettxoutsetinfo('muhash') + res8 = index_node.gettxoutsetinfo("muhash") self.restart_node(1, extra_args=self.extra_args[1]) - res9 = index_node.gettxoutsetinfo('muhash') + res9 = index_node.gettxoutsetinfo("muhash") assert_equal(res8, res9) self.generate(index_node, 1, sync_fun=self.no_op) - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", - index_node.gettxoutsetinfo, 'muhash')) - res10 = index_node.gettxoutsetinfo('muhash') - assert res8['txouts'] < res10['txouts'] + self.wait_until( + lambda: not try_rpc( + -32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, "muhash" + ) + ) + res10 = index_node.gettxoutsetinfo("muhash") + assert res8["txouts"] < res10["txouts"] def _test_use_index_option(self): self.log.info("Test use_index option for nodes running the index") self.connect_nodes(0, 1) self.nodes[0].waitforblockheight(110) - res = self.nodes[0].gettxoutsetinfo('muhash') + res = self.nodes[0].gettxoutsetinfo("muhash") option_res = self.nodes[1].gettxoutsetinfo( - hash_type='muhash', hash_or_height=None, use_index=False) - del res['disk_size'], option_res['disk_size'] + hash_type="muhash", hash_or_height=None, use_index=False + ) + del res["disk_size"], option_res["disk_size"] assert_equal(res, option_res) def _test_reorg_index(self): @@ -256,26 +288,31 @@ # Generate two block, let the index catch up, then invalidate the # blocks index_node = self.nodes[1] - reorg_blocks = self.generatetoaddress( - index_node, 2, getnewdestination()[2]) + reorg_blocks = self.generatetoaddress(index_node, 2, getnewdestination()[2]) reorg_block = reorg_blocks[1] - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", - index_node.gettxoutsetinfo, 'muhash')) - res_invalid = index_node.gettxoutsetinfo('muhash') + self.wait_until( + lambda: not try_rpc( + -32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, "muhash" + ) + ) + res_invalid = index_node.gettxoutsetinfo("muhash") index_node.invalidateblock(reorg_blocks[0]) - assert_equal(index_node.gettxoutsetinfo('muhash')['height'], 110) + assert_equal(index_node.gettxoutsetinfo("muhash")["height"], 110) # Add two new blocks block = self.generate(index_node, 2, sync_fun=self.no_op)[1] - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", - index_node.gettxoutsetinfo, 'muhash')) + self.wait_until( + lambda: not try_rpc( + -32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, "muhash" + ) + ) res = index_node.gettxoutsetinfo( - hash_type='muhash', hash_or_height=None, use_index=False) + hash_type="muhash", hash_or_height=None, use_index=False + ) # Test that the result of the reorged block is not returned for its old # block height - res2 = index_node.gettxoutsetinfo( - hash_type='muhash', hash_or_height=112) + res2 = index_node.gettxoutsetinfo(hash_type="muhash", hash_or_height=112) assert_equal(res["bestblock"], block) assert_equal(res["muhash"], res2["muhash"]) assert res["muhash"] != res_invalid["muhash"] @@ -283,7 +320,8 @@ # Test that requesting reorged out block by hash is still returning # correct results res_invalid2 = index_node.gettxoutsetinfo( - hash_type='muhash', hash_or_height=reorg_block) + hash_type="muhash", hash_or_height=reorg_block + ) assert_equal(res_invalid2["muhash"], res_invalid["muhash"]) assert res["muhash"] != res_invalid2["muhash"] @@ -294,42 +332,63 @@ # Ensure that removing and re-adding blocks yields consistent results block = index_node.getblockhash(99) index_node.invalidateblock(block) - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", - index_node.gettxoutsetinfo, 'muhash')) + self.wait_until( + lambda: not try_rpc( + -32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, "muhash" + ) + ) index_node.reconsiderblock(block) - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", - index_node.gettxoutsetinfo, 'muhash')) - res3 = index_node.gettxoutsetinfo( - hash_type='muhash', hash_or_height=112) + self.wait_until( + lambda: not try_rpc( + -32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, "muhash" + ) + ) + res3 = index_node.gettxoutsetinfo(hash_type="muhash", hash_or_height=112) assert_equal(res2, res3) - self.log.info( - "Test that a node aware of stale blocks syncs them as well") + self.log.info("Test that a node aware of stale blocks syncs them as well") node = self.nodes[0] # Ensure the node is aware of a stale block prior to restart node.getblock(reorg_block) self.restart_node(0, ["-coinstatsindex"]) - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", - node.gettxoutsetinfo, 'muhash')) - assert_raises_rpc_error(-32603, "Unable to read UTXO set", - node.gettxoutsetinfo, 'muhash', reorg_block) + self.wait_until( + lambda: not try_rpc( + -32603, "Unable to read UTXO set", node.gettxoutsetinfo, "muhash" + ) + ) + assert_raises_rpc_error( + -32603, + "Unable to read UTXO set", + node.gettxoutsetinfo, + "muhash", + reorg_block, + ) def _test_index_rejects_hash_serialized(self): self.log.info( - "Test that the rpc raises if the legacy hash is passed with the index") + "Test that the rpc raises if the legacy hash is passed with the index" + ) msg = "hash_serialized hash type cannot be queried for a specific block" - assert_raises_rpc_error(-8, msg, - self.nodes[1].gettxoutsetinfo, - hash_type='hash_serialized', hash_or_height=111) + assert_raises_rpc_error( + -8, + msg, + self.nodes[1].gettxoutsetinfo, + hash_type="hash_serialized", + hash_or_height=111, + ) for use_index in {True, False, None}: - assert_raises_rpc_error(-8, msg, - self.nodes[1].gettxoutsetinfo, - hash_type='hash_serialized', - hash_or_height=111, use_index=use_index) + assert_raises_rpc_error( + -8, + msg, + self.nodes[1].gettxoutsetinfo, + hash_type="hash_serialized", + hash_or_height=111, + use_index=use_index, + ) -if __name__ == '__main__': +if __name__ == "__main__": CoinStatsIndexTest().main() diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py --- a/test/functional/feature_config_args.py +++ b/test/functional/feature_config_args.py @@ -21,156 +21,206 @@ def test_config_file_parser(self): self.stop_node(0) - inc_conf_file_path = os.path.join( - self.nodes[0].datadir, 'include.conf') - with open(os.path.join(self.nodes[0].datadir, 'bitcoin.conf'), 'a', encoding='utf-8') as conf: - conf.write(f'includeconf={inc_conf_file_path}\n') + inc_conf_file_path = os.path.join(self.nodes[0].datadir, "include.conf") + with open( + os.path.join(self.nodes[0].datadir, "bitcoin.conf"), "a", encoding="utf-8" + ) as conf: + conf.write(f"includeconf={inc_conf_file_path}\n") self.nodes[0].assert_start_raises_init_error( - expected_msg='Error: Error parsing command line arguments: Invalid parameter -dash_cli=1', - extra_args=['-dash_cli=1'], + expected_msg=( + "Error: Error parsing command line arguments: Invalid parameter" + " -dash_cli=1" + ), + extra_args=["-dash_cli=1"], ) - with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: - conf.write('dash_conf=1\n') - with self.nodes[0].assert_debug_log(expected_msgs=['Ignoring unknown configuration value dash_conf']): + with open(inc_conf_file_path, "w", encoding="utf-8") as conf: + conf.write("dash_conf=1\n") + with self.nodes[0].assert_debug_log( + expected_msgs=["Ignoring unknown configuration value dash_conf"] + ): self.start_node(0) self.stop_node(0) - with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: - conf.write('-dash=1\n') + with open(inc_conf_file_path, "w", encoding="utf-8") as conf: + conf.write("-dash=1\n") self.nodes[0].assert_start_raises_init_error( - expected_msg='Error: Error reading configuration file: parse error on line 1: -dash=1, options in configuration file must be specified without leading -') + expected_msg=( + "Error: Error reading configuration file: parse error on line 1:" + " -dash=1, options in configuration file must be specified without" + " leading -" + ) + ) if self.is_wallet_compiled(): - with open(inc_conf_file_path, 'w', encoding='utf8') as conf: + with open(inc_conf_file_path, "w", encoding="utf8") as conf: conf.write("wallet=foo\n") self.nodes[0].assert_start_raises_init_error( - expected_msg=f'Error: Config setting for -wallet only applied on {self.chain} network when in [{self.chain}] section.') - - with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: - conf.write('regtest=0\n') # mainnet - conf.write('acceptnonstdtxn=1\n') + expected_msg=( + "Error: Config setting for -wallet only applied on" + f" {self.chain} network when in [{self.chain}] section." + ) + ) + + with open(inc_conf_file_path, "w", encoding="utf-8") as conf: + conf.write("regtest=0\n") # mainnet + conf.write("acceptnonstdtxn=1\n") self.nodes[0].assert_start_raises_init_error( - expected_msg='Error: acceptnonstdtxn is not currently supported for main chain') + expected_msg=( + "Error: acceptnonstdtxn is not currently supported for main chain" + ) + ) - with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: - conf.write('nono\n') + with open(inc_conf_file_path, "w", encoding="utf-8") as conf: + conf.write("nono\n") self.nodes[0].assert_start_raises_init_error( - expected_msg='Error: Error reading configuration file: parse error on line 1: nono, if you intended to specify a negated option, use nono=1 instead') + expected_msg=( + "Error: Error reading configuration file: parse error on line 1: nono," + " if you intended to specify a negated option, use nono=1 instead" + ) + ) - with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: - conf.write('server=1\nrpcuser=someuser\nrpcpassword=some#pass') + with open(inc_conf_file_path, "w", encoding="utf-8") as conf: + conf.write("server=1\nrpcuser=someuser\nrpcpassword=some#pass") self.nodes[0].assert_start_raises_init_error( - expected_msg='Error: Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided') + expected_msg=( + "Error: Error reading configuration file: parse error on line 3, using" + " # in rpcpassword can be ambiguous and should be avoided" + ) + ) - with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: - conf.write('server=1\nrpcuser=someuser\nmain.rpcpassword=some#pass') + with open(inc_conf_file_path, "w", encoding="utf-8") as conf: + conf.write("server=1\nrpcuser=someuser\nmain.rpcpassword=some#pass") self.nodes[0].assert_start_raises_init_error( - expected_msg='Error: Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided') + expected_msg=( + "Error: Error reading configuration file: parse error on line 3, using" + " # in rpcpassword can be ambiguous and should be avoided" + ) + ) - with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: - conf.write( - 'server=1\nrpcuser=someuser\n[main]\nrpcpassword=some#pass') + with open(inc_conf_file_path, "w", encoding="utf-8") as conf: + conf.write("server=1\nrpcuser=someuser\n[main]\nrpcpassword=some#pass") self.nodes[0].assert_start_raises_init_error( - expected_msg='Error: Error reading configuration file: parse error on line 4, using # in rpcpassword can be ambiguous and should be avoided') + expected_msg=( + "Error: Error reading configuration file: parse error on line 4, using" + " # in rpcpassword can be ambiguous and should be avoided" + ) + ) - inc_conf_file2_path = os.path.join( - self.nodes[0].datadir, 'include2.conf') - with open(os.path.join(self.nodes[0].datadir, 'bitcoin.conf'), 'a', encoding='utf-8') as conf: - conf.write(f'includeconf={inc_conf_file2_path}\n') + inc_conf_file2_path = os.path.join(self.nodes[0].datadir, "include2.conf") + with open( + os.path.join(self.nodes[0].datadir, "bitcoin.conf"), "a", encoding="utf-8" + ) as conf: + conf.write(f"includeconf={inc_conf_file2_path}\n") - with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: - conf.write('testnot.datadir=1\n') - with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf: - conf.write('[testnet]\n') + with open(inc_conf_file_path, "w", encoding="utf-8") as conf: + conf.write("testnot.datadir=1\n") + with open(inc_conf_file2_path, "w", encoding="utf-8") as conf: + conf.write("[testnet]\n") self.restart_node(0) self.nodes[0].stop_node( - expected_stderr='Warning: ' + - inc_conf_file_path + - ':1 Section [testnot] is not recognized.' + - os.linesep + - inc_conf_file2_path + - ':1 Section [testnet] is not recognized.') - - with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: - conf.write('') # clear - with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf: - conf.write('') # clear + expected_stderr="Warning: " + + inc_conf_file_path + + ":1 Section [testnot] is not recognized." + + os.linesep + + inc_conf_file2_path + + ":1 Section [testnet] is not recognized." + ) + + with open(inc_conf_file_path, "w", encoding="utf-8") as conf: + conf.write("") # clear + with open(inc_conf_file2_path, "w", encoding="utf-8") as conf: + conf.write("") # clear def test_invalid_command_line_options(self): self.nodes[0].assert_start_raises_init_error( - expected_msg='Error: No proxy server specified. Use -proxy= or ' - '-proxy=.', - extra_args=['-proxy'], + expected_msg=( + "Error: No proxy server specified. Use -proxy= or -proxy=." + ), + extra_args=["-proxy"], ) def test_log_buffer(self): self.stop_node(0) - with self.nodes[0].assert_debug_log(expected_msgs=['Warning: parsed potentially confusing double-negative -connect=0\n']): - self.start_node(0, extra_args=['-noconnect=0']) + with self.nodes[0].assert_debug_log( + expected_msgs=[ + "Warning: parsed potentially confusing double-negative -connect=0\n" + ] + ): + self.start_node(0, extra_args=["-noconnect=0"]) def test_args_log(self): self.stop_node(0) - self.log.info('Test config args logging') + self.log.info("Test config args logging") with self.nodes[0].assert_debug_log( - expected_msgs=[ - 'Command-line arg: addnode="some.node"', - 'Command-line arg: rpcauth=****', - 'Command-line arg: rpcbind=****', - 'Command-line arg: rpcpassword=****', - 'Command-line arg: rpcuser=****', - 'Command-line arg: torpassword=****', - f'Config file arg: {self.chain}="1"', - f'Config file arg: [{self.chain}] server="1"', + expected_msgs=[ + 'Command-line arg: addnode="some.node"', + "Command-line arg: rpcauth=****", + "Command-line arg: rpcbind=****", + "Command-line arg: rpcpassword=****", + "Command-line arg: rpcuser=****", + "Command-line arg: torpassword=****", + f'Config file arg: {self.chain}="1"', + f'Config file arg: [{self.chain}] server="1"', + ], + unexpected_msgs=[ + "alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0", + "127.1.1.1", + "secret-rpcuser", + "secret-torpassword", + ], + ): + self.start_node( + 0, + extra_args=[ + "-addnode=some.node", + "-rpcauth=alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0", + "-rpcbind=127.1.1.1", + "-rpcpassword=", + "-rpcuser=secret-rpcuser", + "-torpassword=secret-torpassword", ], - unexpected_msgs=[ - 'alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0', - '127.1.1.1', - 'secret-rpcuser', - 'secret-torpassword', - ]): - self.start_node(0, extra_args=[ - '-addnode=some.node', - '-rpcauth=alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0', - '-rpcbind=127.1.1.1', - '-rpcpassword=', - '-rpcuser=secret-rpcuser', - '-torpassword=secret-torpassword', - ]) + ) def test_networkactive(self): - self.log.info('Test -networkactive option') + self.log.info("Test -networkactive option") self.stop_node(0) - with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']): + with self.nodes[0].assert_debug_log(expected_msgs=["SetNetworkActive: true\n"]): self.start_node(0) self.stop_node(0) - with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']): - self.start_node(0, extra_args=['-networkactive']) + with self.nodes[0].assert_debug_log(expected_msgs=["SetNetworkActive: true\n"]): + self.start_node(0, extra_args=["-networkactive"]) self.stop_node(0) - with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']): - self.start_node(0, extra_args=['-networkactive=1']) + with self.nodes[0].assert_debug_log(expected_msgs=["SetNetworkActive: true\n"]): + self.start_node(0, extra_args=["-networkactive=1"]) self.stop_node(0) - with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']): - self.start_node(0, extra_args=['-networkactive=0']) + with self.nodes[0].assert_debug_log( + expected_msgs=["SetNetworkActive: false\n"] + ): + self.start_node(0, extra_args=["-networkactive=0"]) self.stop_node(0) - with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']): - self.start_node(0, extra_args=['-nonetworkactive']) + with self.nodes[0].assert_debug_log( + expected_msgs=["SetNetworkActive: false\n"] + ): + self.start_node(0, extra_args=["-nonetworkactive"]) self.stop_node(0) - with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']): - self.start_node(0, extra_args=['-nonetworkactive=1']) + with self.nodes[0].assert_debug_log( + expected_msgs=["SetNetworkActive: false\n"] + ): + self.start_node(0, extra_args=["-nonetworkactive=1"]) def test_seed_peers(self): - self.log.info('Test seed peers') + self.log.info("Test seed peers") default_data_dir = self.nodes[0].datadir # Only regtest has no fixed seeds. To avoid connections to random # nodes, regtest is the only network where it is safe to enable # -fixedseeds in tests - assert_equal(self.nodes[0].getblockchaininfo()['chain'], 'regtest') + assert_equal(self.nodes[0].getblockchaininfo()["chain"], "regtest") self.stop_node(0) # No peers.dat exists and -dnsseed=1 @@ -179,19 +229,21 @@ # a slow test) assert not os.path.exists(os.path.join(default_data_dir, "peers.dat")) start = int(time.time()) - with self.nodes[0].assert_debug_log(expected_msgs=[ + with self.nodes[0].assert_debug_log( + expected_msgs=[ "Loaded 0 addresses from peers.dat", "0 addresses found from DNS seeds", - ], timeout=10): + ], + timeout=10, + ): self.start_node( - 0, - extra_args=[ - '-dnsseed=1', - '-fixedseeds=1', - f'-mocktime={start}']) - with self.nodes[0].assert_debug_log(expected_msgs=[ + 0, extra_args=["-dnsseed=1", "-fixedseeds=1", f"-mocktime={start}"] + ) + with self.nodes[0].assert_debug_log( + expected_msgs=[ "Adding fixed seeds as 60 seconds have passed and addrman is empty", - ]): + ] + ): self.nodes[0].setmocktime(start + 65) self.stop_node(0) @@ -199,12 +251,18 @@ # We expect the node will fallback immediately to fixed seeds assert not os.path.exists(os.path.join(default_data_dir, "peers.dat")) start = time.time() - with self.nodes[0].assert_debug_log(expected_msgs=[ + with self.nodes[0].assert_debug_log( + expected_msgs=[ "Loaded 0 addresses from peers.dat", "DNS seeding disabled", - "Adding fixed seeds as -dnsseed=0, -addnode is not provided and all -seednode(s) attempted\n", - ], timeout=10): - self.start_node(0, extra_args=['-dnsseed=0', '-fixedseeds=1']) + ( + "Adding fixed seeds as -dnsseed=0, -addnode is not provided and all" + " -seednode(s) attempted\n" + ), + ], + timeout=10, + ): + self.start_node(0, extra_args=["-dnsseed=0", "-fixedseeds=1"]) assert time.time() - start < 60 self.stop_node(0) @@ -212,12 +270,15 @@ # We expect the node will not add fixed seeds when explicitly disabled. assert not os.path.exists(os.path.join(default_data_dir, "peers.dat")) start = time.time() - with self.nodes[0].assert_debug_log(expected_msgs=[ + with self.nodes[0].assert_debug_log( + expected_msgs=[ "Loaded 0 addresses from peers.dat", "DNS seeding disabled", "Fixed seeds are disabled", - ], timeout=10): - self.start_node(0, extra_args=['-dnsseed=0', '-fixedseeds=0']) + ], + timeout=10, + ): + self.start_node(0, extra_args=["-dnsseed=0", "-fixedseeds=0"]) assert time.time() - start < 60 self.stop_node(0) @@ -225,20 +286,27 @@ # We expect the node will allow 60 seconds prior to using fixed seeds assert not os.path.exists(os.path.join(default_data_dir, "peers.dat")) start = int(time.time()) - with self.nodes[0].assert_debug_log(expected_msgs=[ + with self.nodes[0].assert_debug_log( + expected_msgs=[ "Loaded 0 addresses from peers.dat", "DNS seeding disabled", - ], timeout=10): + ], + timeout=10, + ): self.start_node( 0, extra_args=[ - '-dnsseed=0', - '-fixedseeds=1', - '-addnode=fakenodeaddr', - f'-mocktime={start}']) - with self.nodes[0].assert_debug_log(expected_msgs=[ + "-dnsseed=0", + "-fixedseeds=1", + "-addnode=fakenodeaddr", + f"-mocktime={start}", + ], + ) + with self.nodes[0].assert_debug_log( + expected_msgs=[ "Adding fixed seeds as 60 seconds have passed and addrman is empty", - ]): + ] + ): self.nodes[0].setmocktime(start + 65) def run_test(self): @@ -254,55 +322,55 @@ self.nodes[0].remove_default_args(["-datadir"]) default_data_dir = self.nodes[0].datadir - new_data_dir = os.path.join(default_data_dir, 'newdatadir') - new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2') + new_data_dir = os.path.join(default_data_dir, "newdatadir") + new_data_dir_2 = os.path.join(default_data_dir, "newdatadir2") # Check that using -datadir argument on non-existent directory fails self.nodes[0].datadir = new_data_dir self.nodes[0].assert_start_raises_init_error( - [f'-datadir={new_data_dir}'], - f'Error: Specified data directory "{new_data_dir}" does not exist.') + [f"-datadir={new_data_dir}"], + f'Error: Specified data directory "{new_data_dir}" does not exist.', + ) # Check that using non-existent datadir in conf file fails conf_file = os.path.join(default_data_dir, "bitcoin.conf") # datadir needs to be set before [chain] section - conf_file_contents = open(conf_file, encoding='utf8').read() - with open(conf_file, 'w', encoding='utf8') as f: + conf_file_contents = open(conf_file, encoding="utf8").read() + with open(conf_file, "w", encoding="utf8") as f: f.write(f"datadir={new_data_dir}\n") f.write(conf_file_contents) self.nodes[0].assert_start_raises_init_error( - [f'-conf={conf_file}'], - f'Error: Error reading configuration file: specified data directory "{new_data_dir}" does not exist.') + [f"-conf={conf_file}"], + ( + "Error: Error reading configuration file: specified data directory" + f' "{new_data_dir}" does not exist.' + ), + ) # Create the directory and ensure the config file now works os.mkdir(new_data_dir) - self.start_node(0, [f'-conf={conf_file}', '-wallet=w1']) + self.start_node(0, [f"-conf={conf_file}", "-wallet=w1"]) self.stop_node(0) - assert os.path.exists(os.path.join(new_data_dir, self.chain, 'blocks')) + assert os.path.exists(os.path.join(new_data_dir, self.chain, "blocks")) if self.is_wallet_compiled(): - assert os.path.exists(os.path.join( - new_data_dir, self.chain, 'wallets', 'w1')) + assert os.path.exists( + os.path.join(new_data_dir, self.chain, "wallets", "w1") + ) # Ensure command line argument overrides datadir in conf os.mkdir(new_data_dir_2) self.nodes[0].datadir = new_data_dir_2 - self.start_node(0, [f'-datadir={new_data_dir_2}', - f'-conf={conf_file}', '-wallet=w2']) - assert os.path.exists( - os.path.join( - new_data_dir_2, - self.chain, - 'blocks')) + self.start_node( + 0, [f"-datadir={new_data_dir_2}", f"-conf={conf_file}", "-wallet=w2"] + ) + assert os.path.exists(os.path.join(new_data_dir_2, self.chain, "blocks")) if self.is_wallet_compiled(): assert os.path.exists( - os.path.join( - new_data_dir_2, - self.chain, - 'wallets', - 'w2')) + os.path.join(new_data_dir_2, self.chain, "wallets", "w2") + ) -if __name__ == '__main__': +if __name__ == "__main__": ConfArgsTest().main() diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py --- a/test/functional/feature_csv_activation.py +++ b/test/functional/feature_csv_activation.py @@ -79,11 +79,11 @@ def all_rlt_txs(txs): - return [tx['tx'] for tx in txs] + return [tx["tx"] for tx in txs] def get_csv_status(node): - height = node.getblockchaininfo()['blocks'] + height = node.getblockchaininfo()["blocks"] return height >= 576 @@ -98,13 +98,14 @@ def sign_transaction(node, unsignedtx): rawtx = ToHex(unsignedtx) signresult = node.signrawtransactionwithwallet(rawtx) - tx = FromHex(CTransaction(), signresult['hex']) + tx = FromHex(CTransaction(), signresult["hex"]) return tx def spend_tx(node, prev_tx, address): spendtx = create_transaction( - node, prev_tx.hash, address, amount=(prev_tx.vout[0].nValue - 1000) / XEC) + node, prev_tx.hash, address, amount=(prev_tx.vout[0].nValue - 1000) / XEC + ) spendtx.nVersion = prev_tx.nVersion pad_tx(spendtx) spendtx.rehash() @@ -112,11 +113,9 @@ def create_bip112special(node, txid, txversion, address): - tx = create_transaction( - node, txid, address, amount=Decimal("49980000")) + tx = create_transaction(node, txid, address, amount=Decimal("49980000")) tx.nVersion = txversion - tx.vout[0].scriptPubKey = CScript( - [-1, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE]) + tx.vout[0].scriptPubKey = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE]) tx.rehash() signtx = sign_transaction(node, tx) signtx.rehash() @@ -125,8 +124,19 @@ def send_generic_input_tx(node, coinbases, address): - return node.sendrawtransaction(ToHex(sign_transaction(node, create_transaction( - node, node.getblock(coinbases.pop())['tx'][0], address, amount=Decimal("49990000"))))) + return node.sendrawtransaction( + ToHex( + sign_transaction( + node, + create_transaction( + node, + node.getblock(coinbases.pop())["tx"][0], + address, + amount=Decimal("49990000"), + ), + ) + ) + ) def create_bip68txs(node, bip68inputs, txversion, address, locktime_delta=0): @@ -136,40 +146,45 @@ for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)): locktime = relative_locktime(sdf, srhb, stf, srlb) tx = create_transaction( - node, bip68inputs[i], address, amount=Decimal("49980000")) + node, bip68inputs[i], address, amount=Decimal("49980000") + ) tx.nVersion = txversion tx.vin[0].nSequence = locktime + locktime_delta tx = sign_transaction(node, tx) tx.rehash() - txs.append({'tx': tx, 'sdf': sdf, 'stf': stf}) + txs.append({"tx": tx, "sdf": sdf, "stf": stf}) return txs -def create_bip112txs(node, bip112inputs, varyOP_CSV, - txversion, address, locktime_delta=0): +def create_bip112txs( + node, bip112inputs, varyOP_CSV, txversion, address, locktime_delta=0 +): """Returns a list of bip112 transactions with different bits set.""" txs = [] assert len(bip112inputs) >= 16 for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)): locktime = relative_locktime(sdf, srhb, stf, srlb) tx = create_transaction( - node, bip112inputs[i], address, amount=Decimal("49980000")) - if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed + node, bip112inputs[i], address, amount=Decimal("49980000") + ) + if varyOP_CSV: # if varying OP_CSV, nSequence is fixed tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME + locktime_delta else: # vary nSequence instead, OP_CSV is fixed tx.vin[0].nSequence = locktime + locktime_delta tx.nVersion = txversion - if (varyOP_CSV): + if varyOP_CSV: tx.vout[0].scriptPubKey = CScript( - [locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE]) + [locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE] + ) else: tx.vout[0].scriptPubKey = CScript( - [BASE_RELATIVE_LOCKTIME, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE]) + [BASE_RELATIVE_LOCKTIME, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE] + ) tx.rehash() signtx = sign_transaction(node, tx) signtx.rehash() - txs.append({'tx': signtx, 'sdf': sdf, 'stf': stf}) + txs.append({"tx": signtx, "sdf": sdf, "stf": stf}) return txs @@ -177,7 +192,7 @@ def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True - self.extra_args = [['-whitelist=noban@127.0.0.1']] + self.extra_args = [["-whitelist=noban@127.0.0.1"]] def skip_test_if_missing_module(self): self.skip_if_no_wallet() @@ -193,8 +208,9 @@ return test_blocks def create_test_block(self, txs, version=536870912): - block = create_block(self.tip, create_coinbase( - self.tipheight + 1), self.last_block_time + 600) + block = create_block( + self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600 + ) block.nVersion = version block.vtx.extend(txs) make_conform_to_ctor(block) @@ -219,9 +235,9 @@ def send_blocks(self, blocks, success=True): """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. - Call with success = False if the tip shouldn't advance to the most recent block.""" - self.helper_peer.send_blocks_and_test( - blocks, self.nodes[0], success=success) + Call with success = False if the tip shouldn't advance to the most recent block. + """ + self.helper_peer.send_blocks_and_test(blocks, self.nodes[0], success=success) def run_test(self): self.helper_peer = self.nodes[0].add_p2p_connection(P2PDataStore()) @@ -234,8 +250,7 @@ # long_past_time self.nodes[0].setmocktime(long_past_time - 100) # 82 blocks generated for inputs - self.coinbase_blocks = self.generate( - self.nodes[0], 1 + 16 + 2 * 32 + 1) + self.coinbase_blocks = self.generate(self.nodes[0], 1 + 16 + 2 * 32 + 1) # Set time back to present so yielded blocks aren't in the future as # we advance last_block_time self.nodes[0].setmocktime(0) @@ -263,8 +278,11 @@ # 16 normal inputs bip68inputs = [] for _ in range(16): - bip68inputs.append(send_generic_input_tx( - self.nodes[0], self.coinbase_blocks, self.nodeaddress)) + bip68inputs.append( + send_generic_input_tx( + self.nodes[0], self.coinbase_blocks, self.nodeaddress + ) + ) # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be # prepended to spending scriptSig) @@ -272,8 +290,11 @@ for _ in range(2): inputs = [] for _ in range(16): - inputs.append(send_generic_input_tx( - self.nodes[0], self.coinbase_blocks, self.nodeaddress)) + inputs.append( + send_generic_input_tx( + self.nodes[0], self.coinbase_blocks, self.nodeaddress + ) + ) bip112basicinputs.append(inputs) # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP @@ -282,18 +303,23 @@ for _ in range(2): inputs = [] for _ in range(16): - inputs.append(send_generic_input_tx( - self.nodes[0], self.coinbase_blocks, self.nodeaddress)) + inputs.append( + send_generic_input_tx( + self.nodes[0], self.coinbase_blocks, self.nodeaddress + ) + ) bip112diverseinputs.append(inputs) # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to # spending scriptSig) bip112specialinput = send_generic_input_tx( - self.nodes[0], self.coinbase_blocks, self.nodeaddress) + self.nodes[0], self.coinbase_blocks, self.nodeaddress + ) # 1 normal input bip113input = send_generic_input_tx( - self.nodes[0], self.coinbase_blocks, self.nodeaddress) + self.nodes[0], self.coinbase_blocks, self.nodeaddress + ) self.nodes[0].setmocktime(self.last_block_time + 600) # 1 block generated for inputs to be in chain at height 572 @@ -302,8 +328,7 @@ self.tip = int(inputblockhash, 16) self.tipheight += 1 self.last_block_time += 600 - assert_equal(len(self.nodes[0].getblock( - inputblockhash, True)["tx"]), 82 + 1) + assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), 82 + 1) # 2 more version 4 blocks test_blocks = self.generate_blocks(2) @@ -311,55 +336,66 @@ self.send_blocks(test_blocks) self.log.info( - "Not yet activated, height = 574 (will activate for block 576, not 575)") + "Not yet activated, height = 574 (will activate for block 576, not 575)" + ) assert_equal(get_csv_status(self.nodes[0]), False) # Test both version 1 and version 2 transactions for all tests # BIP113 test transaction will be modified before each use to # put in appropriate block time bip113tx_v1 = create_transaction( - self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49980000")) + self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49980000") + ) bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE bip113tx_v1.nVersion = 1 bip113tx_v2 = create_transaction( - self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49980000")) + self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49980000") + ) bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE bip113tx_v2.nVersion = 2 # For BIP68 test all 16 relative sequence locktimes - bip68txs_v1 = create_bip68txs( - self.nodes[0], bip68inputs, 1, self.nodeaddress) - bip68txs_v2 = create_bip68txs( - self.nodes[0], bip68inputs, 2, self.nodeaddress) + bip68txs_v1 = create_bip68txs(self.nodes[0], bip68inputs, 1, self.nodeaddress) + bip68txs_v2 = create_bip68txs(self.nodes[0], bip68inputs, 2, self.nodeaddress) # For BIP112 test: # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs bip112txs_vary_nSequence_v1 = create_bip112txs( - self.nodes[0], bip112basicinputs[0], False, 1, self.nodeaddress) + self.nodes[0], bip112basicinputs[0], False, 1, self.nodeaddress + ) bip112txs_vary_nSequence_v2 = create_bip112txs( - self.nodes[0], bip112basicinputs[0], False, 2, self.nodeaddress) + self.nodes[0], bip112basicinputs[0], False, 2, self.nodeaddress + ) # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs bip112txs_vary_nSequence_9_v1 = create_bip112txs( - self.nodes[0], bip112basicinputs[1], False, 1, self.nodeaddress, -1) + self.nodes[0], bip112basicinputs[1], False, 1, self.nodeaddress, -1 + ) bip112txs_vary_nSequence_9_v2 = create_bip112txs( - self.nodes[0], bip112basicinputs[1], False, 2, self.nodeaddress, -1) + self.nodes[0], bip112basicinputs[1], False, 2, self.nodeaddress, -1 + ) # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV # OP_DROP inputs bip112txs_vary_OP_CSV_v1 = create_bip112txs( - self.nodes[0], bip112diverseinputs[0], True, 1, self.nodeaddress) + self.nodes[0], bip112diverseinputs[0], True, 1, self.nodeaddress + ) bip112txs_vary_OP_CSV_v2 = create_bip112txs( - self.nodes[0], bip112diverseinputs[0], True, 2, self.nodeaddress) + self.nodes[0], bip112diverseinputs[0], True, 2, self.nodeaddress + ) # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV # OP_DROP inputs bip112txs_vary_OP_CSV_9_v1 = create_bip112txs( - self.nodes[0], bip112diverseinputs[1], True, 1, self.nodeaddress, -1) + self.nodes[0], bip112diverseinputs[1], True, 1, self.nodeaddress, -1 + ) bip112txs_vary_OP_CSV_9_v2 = create_bip112txs( - self.nodes[0], bip112diverseinputs[1], True, 2, self.nodeaddress, -1) + self.nodes[0], bip112diverseinputs[1], True, 2, self.nodeaddress, -1 + ) # -1 OP_CSV OP_DROP input bip112tx_special_v1 = create_bip112special( - self.nodes[0], bip112specialinput, 1, self.nodeaddress) + self.nodes[0], bip112specialinput, 1, self.nodeaddress + ) bip112tx_special_v2 = create_bip112special( - self.nodes[0], bip112specialinput, 2, self.nodeaddress) + self.nodes[0], bip112specialinput, 2, self.nodeaddress + ) self.log.info("TESTING") @@ -374,23 +410,40 @@ success_txs.append(bip113signed1) success_txs.append(bip112tx_special_v1) success_txs.append( - spend_tx(self.nodes[0], bip112tx_special_v1, self.nodeaddress)) + spend_tx(self.nodes[0], bip112tx_special_v1, self.nodeaddress) + ) # add BIP 68 txs success_txs.extend(all_rlt_txs(bip68txs_v1)) # add BIP 112 with seq=10 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) - success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) - for tx in all_rlt_txs(bip112txs_vary_nSequence_v1)]) + success_txs.extend( + [ + spend_tx(self.nodes[0], tx, self.nodeaddress) + for tx in all_rlt_txs(bip112txs_vary_nSequence_v1) + ] + ) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1)) - success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) - for tx in all_rlt_txs(bip112txs_vary_OP_CSV_v1)]) + success_txs.extend( + [ + spend_tx(self.nodes[0], tx, self.nodeaddress) + for tx in all_rlt_txs(bip112txs_vary_OP_CSV_v1) + ] + ) # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) - success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) - for tx in all_rlt_txs(bip112txs_vary_nSequence_9_v1)]) + success_txs.extend( + [ + spend_tx(self.nodes[0], tx, self.nodeaddress) + for tx in all_rlt_txs(bip112txs_vary_nSequence_9_v1) + ] + ) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)) - success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) - for tx in all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)]) + success_txs.extend( + [ + spend_tx(self.nodes[0], tx, self.nodeaddress) + for tx in all_rlt_txs(bip112txs_vary_OP_CSV_9_v1) + ] + ) # Test #3 self.send_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) @@ -405,23 +458,40 @@ success_txs.append(bip113signed2) success_txs.append(bip112tx_special_v2) success_txs.append( - spend_tx(self.nodes[0], bip112tx_special_v2, self.nodeaddress)) + spend_tx(self.nodes[0], bip112tx_special_v2, self.nodeaddress) + ) # add BIP 68 txs success_txs.extend(all_rlt_txs(bip68txs_v2)) # add BIP 112 with seq=10 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2)) - success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) - for tx in all_rlt_txs(bip112txs_vary_nSequence_v2)]) + success_txs.extend( + [ + spend_tx(self.nodes[0], tx, self.nodeaddress) + for tx in all_rlt_txs(bip112txs_vary_nSequence_v2) + ] + ) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2)) - success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) - for tx in all_rlt_txs(bip112txs_vary_OP_CSV_v2)]) + success_txs.extend( + [ + spend_tx(self.nodes[0], tx, self.nodeaddress) + for tx in all_rlt_txs(bip112txs_vary_OP_CSV_v2) + ] + ) # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) - success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) - for tx in all_rlt_txs(bip112txs_vary_nSequence_9_v2)]) + success_txs.extend( + [ + spend_tx(self.nodes[0], tx, self.nodeaddress) + for tx in all_rlt_txs(bip112txs_vary_nSequence_9_v2) + ] + ) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)) - success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) - for tx in all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)]) + success_txs.extend( + [ + spend_tx(self.nodes[0], tx, self.nodeaddress) + for tx in all_rlt_txs(bip112txs_vary_OP_CSV_9_v2) + ] + ) # Test #4 self.send_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) @@ -450,8 +520,7 @@ bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: # Test #6, Test #7 - self.send_blocks( - [self.create_test_block([bip113tx])], success=False) + self.send_blocks([self.create_test_block([bip113tx])], success=False) # BIP 113 tests should now pass if the locktime is < MTP # < MTP of prior block @@ -482,20 +551,20 @@ self.log.info("Test version 2 txs") # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass - bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']] + bip68success_txs = [tx["tx"] for tx in bip68txs_v2 if tx["sdf"]] self.send_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # All txs without flag fail as we are at delta height = 8 < 10 and # delta time = 8 * 600 < 10 * 512 - bip68timetxs = [tx['tx'] - for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']] + bip68timetxs = [tx["tx"] for tx in bip68txs_v2 if not tx["sdf"] and tx["stf"]] for tx in bip68timetxs: # Test #13 - Test #16 self.send_blocks([self.create_test_block([tx])], success=False) - bip68heighttxs = [tx['tx'] - for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']] + bip68heighttxs = [ + tx["tx"] for tx in bip68txs_v2 if not tx["sdf"] and not tx["stf"] + ] for tx in bip68heighttxs: # Test #17 - Test #20 self.send_blocks([self.create_test_block([tx])], success=False) @@ -503,7 +572,9 @@ # Advance one block to 581 test_blocks = self.generate_blocks(1) # Test #21 - self.send_blocks(test_blocks,) + self.send_blocks( + test_blocks, + ) # Height txs should fail and time txs should now pass 9 * 600 > 10 * # 512 @@ -531,50 +602,51 @@ # -1 OP_CSV tx should fail # Test #29 - self.send_blocks([self.create_test_block_spend_utxos( - self.nodes[0], [bip112tx_special_v1])], success=False) + self.send_blocks( + [self.create_test_block_spend_utxos(self.nodes[0], [bip112tx_special_v1])], + success=False, + ) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, # version 1 txs should still pass - success_txs = [tx['tx'] - for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']] - success_txs += [tx['tx'] - for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf']] + success_txs = [tx["tx"] for tx in bip112txs_vary_OP_CSV_v1 if tx["sdf"]] + success_txs += [tx["tx"] for tx in bip112txs_vary_OP_CSV_9_v1 if tx["sdf"]] # Test #30 self.send_blocks( - [self.create_test_block_spend_utxos(self.nodes[0], success_txs)]) + [self.create_test_block_spend_utxos(self.nodes[0], success_txs)] + ) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, # version 1 txs should now fail fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1) fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1) - fail_txs += [tx['tx'] - for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']] - fail_txs += [tx['tx'] - for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']] + fail_txs += [tx["tx"] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx["sdf"]] + fail_txs += [tx["tx"] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx["sdf"]] for tx in fail_txs: # Test #31 - Test #78 - self.send_blocks([self.create_test_block_spend_utxos( - self.nodes[0], [tx])], success=False) + self.send_blocks( + [self.create_test_block_spend_utxos(self.nodes[0], [tx])], success=False + ) self.log.info("Test version 2 txs") # -1 OP_CSV tx should fail # Test #79 - self.send_blocks([self.create_test_block_spend_utxos( - self.nodes[0], [bip112tx_special_v2])], success=False) + self.send_blocks( + [self.create_test_block_spend_utxos(self.nodes[0], [bip112tx_special_v2])], + success=False, + ) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, # version 2 txs should pass - success_txs = [tx['tx'] - for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']] - success_txs += [tx['tx'] - for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf']] + success_txs = [tx["tx"] for tx in bip112txs_vary_OP_CSV_v2 if tx["sdf"]] + success_txs += [tx["tx"] for tx in bip112txs_vary_OP_CSV_9_v2 if tx["sdf"]] # Test #80 self.send_blocks( - [self.create_test_block_spend_utxos(self.nodes[0], success_txs)]) + [self.create_test_block_spend_utxos(self.nodes[0], success_txs)] + ) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all @@ -583,36 +655,47 @@ # All txs with nSequence 9 should fail either due to earlier mismatch # or failing the CSV check fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2) - fail_txs += [tx['tx'] - for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']] + fail_txs += [tx["tx"] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx["sdf"]] for tx in fail_txs: # Test #81 - Test #104 - self.send_blocks([self.create_test_block_spend_utxos( - self.nodes[0], [tx])], success=False) + self.send_blocks( + [self.create_test_block_spend_utxos(self.nodes[0], [tx])], success=False + ) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail - fail_txs = [tx['tx'] - for tx in bip112txs_vary_nSequence_v2 if tx['sdf']] + fail_txs = [tx["tx"] for tx in bip112txs_vary_nSequence_v2 if tx["sdf"]] for tx in fail_txs: # Test #105 - Test #112 - self.send_blocks([self.create_test_block_spend_utxos( - self.nodes[0], [tx])], success=False) + self.send_blocks( + [self.create_test_block_spend_utxos(self.nodes[0], [tx])], success=False + ) # If sequencelock types mismatch, tx should fail - fail_txs = [tx['tx'] - for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']] - fail_txs += [tx['tx'] - for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']] + fail_txs = [ + tx["tx"] + for tx in bip112txs_vary_nSequence_v2 + if not tx["sdf"] and tx["stf"] + ] + fail_txs += [ + tx["tx"] for tx in bip112txs_vary_OP_CSV_v2 if not tx["sdf"] and tx["stf"] + ] for tx in fail_txs: # Test #113 - Test #120 - self.send_blocks([self.create_test_block_spend_utxos( - self.nodes[0], [tx])], success=False) + self.send_blocks( + [self.create_test_block_spend_utxos(self.nodes[0], [tx])], success=False + ) # Remaining txs should pass, just test masking works properly success_txs = [ - tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']] - success_txs += [tx['tx'] - for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf']] + tx["tx"] + for tx in bip112txs_vary_nSequence_v2 + if not tx["sdf"] and not tx["stf"] + ] + success_txs += [ + tx["tx"] + for tx in bip112txs_vary_OP_CSV_v2 + if not tx["sdf"] and not tx["stf"] + ] # Test #121 self.send_blocks([self.create_test_block(success_txs)]) @@ -638,8 +721,9 @@ # Additional test, of checking that comparison of two time types works # properly time_txs = [] - for tx in [tx['tx'] - for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]: + for tx in [ + tx["tx"] for tx in bip112txs_vary_OP_CSV_v2 if not tx["sdf"] and tx["stf"] + ]: signtx = sign_transaction(self.nodes[0], tx) time_txs.append(signtx) @@ -669,5 +753,5 @@ # TODO: Test empty stack fails -if __name__ == '__main__': +if __name__ == "__main__": BIP68_112_113Test().main() diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py --- a/test/functional/feature_dbcrash.py +++ b/test/functional/feature_dbcrash.py @@ -46,9 +46,13 @@ # Set -maxmempool=0 to turn off mempool memory sharing with dbcache # Set -rpcservertimeout=900 to reduce socket disconnects in this # long-running test - self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", - "-rpcservertimeout=900", "-dbbatchsize=200000", - "-noparkdeepreorg"] + self.base_args = [ + "-limitdescendantsize=0", + "-maxmempool=0", + "-rpcservertimeout=900", + "-dbbatchsize=200000", + "-noparkdeepreorg", + ] # Set different crash ratios and cache sizes. Note that not all of # -dbcache goes to the in-memory coins cache. @@ -60,9 +64,14 @@ # and non-standard txs (e.g. txs with "dust" outputs) self.node3_args = [ f"-blockmaxsize={DEFAULT_MAX_BLOCK_SIZE}", - "-acceptnonstdtxn"] - self.extra_args = [self.node0_args, self.node1_args, - self.node2_args, self.node3_args] + "-acceptnonstdtxn", + ] + self.extra_args = [ + self.node0_args, + self.node1_args, + self.node2_args, + self.node3_args, + ] def skip_test_if_missing_module(self): self.skip_if_no_wallet() @@ -85,8 +94,7 @@ # Any of these RPC calls could throw due to node crash self.start_node(node_index) self.nodes[node_index].waitforblock(expected_tip) - utxo_hash = self.nodes[node_index].gettxoutsetinfo()[ - 'hash_serialized'] + utxo_hash = self.nodes[node_index].gettxoutsetinfo()["hash_serialized"] return utxo_hash except Exception: # An exception here should mean the node is about to crash. @@ -102,7 +110,8 @@ # TODO: If this happens a lot, we should try to restart without -dbcrashratio # and make sure that recovery happens. raise AssertionError( - f"Unable to successfully restart node {node_index} in allotted time") + f"Unable to successfully restart node {node_index} in allotted time" + ) def submit_block_catch_error(self, node_index, block): """Try submitting a block to the given node. @@ -114,12 +123,13 @@ self.nodes[node_index].submitblock(block) return True except (http.client.CannotSendRequest, http.client.RemoteDisconnected) as e: - self.log.debug( - f"node {node_index} submitblock raised exception: {e}") + self.log.debug(f"node {node_index} submitblock raised exception: {e}") return False except OSError as e: self.log.debug( - f"node {node_index} submitblock raised OSError exception: errno={e.errno}") + f"node {node_index} submitblock raised OSError exception:" + f" errno={e.errno}" + ) if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]: # The node has likely crashed return False @@ -134,27 +144,25 @@ If any nodes crash while updating, we'll compare utxo hashes to ensure recovery was successful.""" - node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized'] + node3_utxo_hash = self.nodes[3].gettxoutsetinfo()["hash_serialized"] # Retrieve all the blocks from node3 blocks = [] for block_hash in block_hashes: - blocks.append( - [block_hash, self.nodes[3].getblock(block_hash, False)]) + blocks.append([block_hash, self.nodes[3].getblock(block_hash, False)]) # Deliver each block to each other node for i in range(3): nodei_utxo_hash = None self.log.debug(f"Syncing blocks to node {i}") - for (block_hash, block) in blocks: + for block_hash, block in blocks: # Get the block from node3, and submit to node_i self.log.debug(f"submitting block {block_hash}") if not self.submit_block_catch_error(i, block): # TODO: more carefully check that the crash is due to -dbcrashratio # (change the exit code perhaps, and check that here?) self.wait_for_node_exit(i, timeout=30) - self.log.debug( - f"Restarting node {i} after block hash {block_hash}") + self.log.debug(f"Restarting node {i} after block hash {block_hash}") nodei_utxo_hash = self.restart_node(i, block_hash) assert nodei_utxo_hash is not None self.restart_counts[i] += 1 @@ -171,25 +179,22 @@ # - we only update the utxo cache after a node restart, since flushing # the cache is a no-op at that point if nodei_utxo_hash is not None: - self.log.debug( - f"Checking txoutsetinfo matches for node {i}") + self.log.debug(f"Checking txoutsetinfo matches for node {i}") assert_equal(nodei_utxo_hash, node3_utxo_hash) def verify_utxo_hash(self): """Verify that the utxo hash of each node matches node3. Restart any nodes that crash while querying.""" - node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized'] + node3_utxo_hash = self.nodes[3].gettxoutsetinfo()["hash_serialized"] self.log.info("Verifying utxo hash matches for all nodes") for i in range(3): try: - nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()[ - 'hash_serialized'] + nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()["hash_serialized"] except OSError: # probably a crash on db flushing - nodei_utxo_hash = self.restart_node( - i, self.nodes[3].getbestblockhash()) + nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash()) assert_equal(nodei_utxo_hash, node3_utxo_hash) def generate_small_transactions(self, node, count, utxo_list): @@ -201,9 +206,8 @@ input_amount = 0 for _ in range(2): utxo = utxo_list.pop() - tx.vin.append( - CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout']))) - input_amount += int(utxo['amount'] * XEC) + tx.vin.append(CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]))) + input_amount += int(utxo["amount"] * XEC) output_amount = (input_amount - FEE) // 3 if output_amount <= 0: @@ -212,32 +216,32 @@ for _ in range(3): tx.vout.append( - CTxOut(output_amount, bytes.fromhex(utxo['scriptPubKey']))) + CTxOut(output_amount, bytes.fromhex(utxo["scriptPubKey"])) + ) # Sign and send the transaction to get into the mempool - tx_signed_hex = node.signrawtransactionwithwallet(ToHex(tx))['hex'] + tx_signed_hex = node.signrawtransactionwithwallet(ToHex(tx))["hex"] node.sendrawtransaction(tx_signed_hex) num_transactions += 1 def run_test(self): # Track test coverage statistics self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2 - self.crashed_on_restart = 0 # Track count of crashes during recovery + self.crashed_on_restart = 0 # Track count of crashes during recovery # Start by creating a lot of utxos on node3 initial_height = self.nodes[3].getblockcount() utxo_list = create_confirmed_utxos( - self, self.nodes[3], 5000, sync_fun=self.no_op) + self, self.nodes[3], 5000, sync_fun=self.no_op + ) self.log.info(f"Prepped {len(utxo_list)} utxo entries") # Sync these blocks with the other nodes block_hashes_to_sync = [] - for height in range(initial_height + 1, - self.nodes[3].getblockcount() + 1): + for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1): block_hashes_to_sync.append(self.nodes[3].getblockhash(height)) - self.log.debug( - f"Syncing {len(block_hashes_to_sync)} blocks with other nodes") + self.log.debug(f"Syncing {len(block_hashes_to_sync)} blocks with other nodes") # Syncing the blocks could cause nodes to crash, so the test begins # here. self.sync_node3blocks(block_hashes_to_sync) @@ -250,7 +254,7 @@ # Note that the current time can be behind the block time due to the # way the miner sets the block time. tip = self.nodes[3].getbestblockhash() - block_time = self.nodes[3].getblockheader(tip)['time'] + block_time = self.nodes[3].getblockheader(tip)["time"] self.nodes[3].setmocktime(block_time) # Main test loop: @@ -262,22 +266,24 @@ self.nodes[3].setmocktime(block_time) self.log.info( - f"Iteration {i}, generating 2500 transactions {self.restart_counts}") + f"Iteration {i}, generating 2500 transactions {self.restart_counts}" + ) # Generate a bunch of small-ish transactions self.generate_small_transactions(self.nodes[3], 2500, utxo_list) # Pick a random block between current tip, and starting tip current_height = self.nodes[3].getblockcount() random_height = random.randint(starting_tip_height, current_height) self.log.debug( - f"At height {current_height}, considering height {random_height}") + f"At height {current_height}, considering height {random_height}" + ) if random_height > starting_tip_height: # Randomly reorg from this point with some probability (1/4 for # tip, 1/5 for tip-1, ...) if random.random() < 1.0 / (current_height + 4 - random_height): - self.log.debug( - f"Invalidating block at height {random_height}") + self.log.debug(f"Invalidating block at height {random_height}") self.nodes[3].invalidateblock( - self.nodes[3].getblockhash(random_height)) + self.nodes[3].getblockhash(random_height) + ) # Now generate new blocks until we pass the old tip height self.log.debug("Mining longer tip") @@ -286,13 +292,15 @@ block_hashes.extend( self.generatetoaddress( self.nodes[3], - nblocks=min(10, current_height + 1 - - self.nodes[3].getblockcount()), + nblocks=min( + 10, current_height + 1 - self.nodes[3].getblockcount() + ), # new address to avoid mining a block that has just been # invalidated address=self.nodes[3].getnewaddress(), sync_fun=self.no_op, - )) + ) + ) self.log.debug(f"Syncing {len(block_hashes)} new blocks...") self.sync_node3blocks(block_hashes) utxo_list = self.nodes[3].listunspent() @@ -306,7 +314,8 @@ # Check the test coverage self.log.info( f"Restarted nodes: {self.restart_counts}; " - f"crashes on restart: {self.crashed_on_restart}") + f"crashes on restart: {self.crashed_on_restart}" + ) # If no nodes were restarted, we didn't test anything. assert self.restart_counts != [0, 0, 0] @@ -317,8 +326,7 @@ # Warn if any of the nodes escaped restart. for i in range(3): if self.restart_counts[i] == 0: - self.log.warning( - f"Node {i} never crashed during utxo flush!") + self.log.warning(f"Node {i} never crashed during utxo flush!") if __name__ == "__main__": diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py --- a/test/functional/feature_dersig.py +++ b/test/functional/feature_dersig.py @@ -28,8 +28,8 @@ scriptSig = CScript(tx.vin[0].scriptSig) newscript = [] for i in scriptSig: - if (len(newscript) == 0): - newscript.append(i[0:-1] + b'\0' + i[-1:]) + if len(newscript) == 0: + newscript.append(i[0:-1] + b"\0" + i[-1:]) else: newscript.append(i) tx.vin[0].scriptSig = CScript(newscript) @@ -38,7 +38,7 @@ class BIP66Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 - self.extra_args = [['-whitelist=noban@127.0.0.1']] + self.extra_args = [["-whitelist=noban@127.0.0.1"]] self.setup_clean_chain = True self.rpc_timeout = 240 @@ -49,40 +49,54 @@ peer = self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info(f"Mining {DERSIG_HEIGHT - 1} blocks") - self.coinbase_txids = [self.nodes[0].getblock( - b)['tx'][0] for b in self.generate(self.nodes[0], DERSIG_HEIGHT - 1)] + self.coinbase_txids = [ + self.nodes[0].getblock(b)["tx"][0] + for b in self.generate(self.nodes[0], DERSIG_HEIGHT - 1) + ] self.nodeaddress = self.nodes[0].getnewaddress() self.log.info("Test that blocks must now be at least version 3") tip = self.nodes[0].getbestblockhash() - block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 - block = create_block( - int(tip, 16), create_coinbase(DERSIG_HEIGHT), block_time) + block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1 + block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT), block_time) block.nVersion = 2 block.rehash() block.solve() - with self.nodes[0].assert_debug_log(expected_msgs=[f'{block.hash}, bad-version(0x00000002)']): + with self.nodes[0].assert_debug_log( + expected_msgs=[f"{block.hash}, bad-version(0x00000002)"] + ): peer.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) peer.sync_with_ping() self.log.info( - "Test that transactions with non-DER signatures cannot appear in a block") + "Test that transactions with non-DER signatures cannot appear in a block" + ) block.nVersion = 3 - spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1], - self.nodeaddress, amount=1000000) + spendtx = create_transaction( + self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1000000 + ) unDERify(spendtx) spendtx.rehash() # First we show that this tx is valid except for DERSIG by getting it # rejected from the mempool for exactly that reason. assert_equal( - [{'txid': spendtx.hash, 'allowed': False, - 'reject-reason': 'mandatory-script-verify-flag-failed (Non-canonical DER signature)'}], + [ + { + "txid": spendtx.hash, + "allowed": False, + "reject-reason": ( + "mandatory-script-verify-flag-failed (Non-canonical DER" + " signature)" + ), + } + ], self.nodes[0].testmempoolaccept( - rawtxs=[spendtx.serialize().hex()], maxfeerate=0) + rawtxs=[spendtx.serialize().hex()], maxfeerate=0 + ), ) # Now we verify that a block with this transaction is also invalid. @@ -91,15 +105,20 @@ block.rehash() block.solve() - with self.nodes[0].assert_debug_log(expected_msgs=[f'ConnectBlock {block.hash} failed, blk-bad-inputs']): + with self.nodes[0].assert_debug_log( + expected_msgs=[f"ConnectBlock {block.hash} failed, blk-bad-inputs"] + ): peer.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) peer.sync_with_ping() self.log.info( - "Test that a version 3 block with a DERSIG-compliant transaction is accepted") - block.vtx[1] = create_transaction(self.nodes[0], - self.coinbase_txids[1], self.nodeaddress, amount=1.0) + "Test that a version 3 block with a DERSIG-compliant transaction is" + " accepted" + ) + block.vtx[1] = create_transaction( + self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0 + ) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() @@ -108,5 +127,5 @@ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) -if __name__ == '__main__': +if __name__ == "__main__": BIP66Test().main() diff --git a/test/functional/feature_deterministic_chain_setup.py b/test/functional/feature_deterministic_chain_setup.py --- a/test/functional/feature_deterministic_chain_setup.py +++ b/test/functional/feature_deterministic_chain_setup.py @@ -21,16 +21,16 @@ else: bip34_coinbase_height = CScriptNum.encode(CScriptNum(height)) extra_nonce = CScriptNum.encode(CScriptNum(1)) - excessive_blocksize_sig = CScriptOp.encode_op_pushdata(b'/EB32.0/') + excessive_blocksize_sig = CScriptOp.encode_op_pushdata(b"/EB32.0/") return bip34_coinbase_height + extra_nonce + excessive_blocksize_sig def get_coinbase(height: int, pubkey: bytes) -> CTransaction: coinbase = CTransaction() coinbase.nVersion = 2 - coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), - get_coinbase_scriptsig(height), - 0xffffffff)) + coinbase.vin.append( + CTxIn(COutPoint(0, 0xFFFFFFFF), get_coinbase_scriptsig(height), 0xFFFFFFFF) + ) coinbaseoutput = CTxOut() coinbaseoutput.nValue = 50 * COIN regtest_halvings = int(height / 150) @@ -41,14 +41,15 @@ return coinbase -def get_empty_block(height: int, base_block_hash: str, block_time: int, - coinbase_pubkey: bytes) -> CBlock: +def get_empty_block( + height: int, base_block_hash: str, block_time: int, coinbase_pubkey: bytes +) -> CBlock: block = CBlock() block.nVersion = 0x20000000 block.nTime = block_time block.hashPrevBlock = int(base_block_hash, 16) # difficulty retargeting is disabled in REGTEST chainparams - block.nBits = 0x207fffff + block.nBits = 0x207FFFFF block.vtx.append(get_coinbase(height, coinbase_pubkey)) block.hashMerkleRoot = block.calc_merkle_root() block.solve() @@ -77,32 +78,35 @@ nonlocal chain_height nonlocal mock_time for _ in range(num_blocks): - block = get_empty_block(chain_height, tip, mock_time, - coinbase_pubkey) + block = get_empty_block(chain_height, tip, mock_time, coinbase_pubkey) assert node.submitblock(block.serialize().hex()) is None tip = node.getbestblockhash() chain_height += 1 mock_time += 1 - self.log.info( - "Reproduce the assertion in the TestChain100Setup constructor.") + self.log.info("Reproduce the assertion in the TestChain100Setup constructor.") mine_blocks(100) - assert_equal(tip, - "7487ae41496da318b430ad04cc5039507a9365bdb26275d79b3fc148c6eea1e9") + assert_equal( + tip, "7487ae41496da318b430ad04cc5039507a9365bdb26275d79b3fc148c6eea1e9" + ) self.log.info("Check m_assumeutxo_data at height 110.") mine_blocks(10) assert_equal(node.getblockchaininfo()["blocks"], 110) - assert_equal(node.gettxoutsetinfo()["hash_serialized"], - "ff755939f6fd81bf966e2f347f5d3660d6239334050eb557a6f005d7d8184ea9") + assert_equal( + node.gettxoutsetinfo()["hash_serialized"], + "ff755939f6fd81bf966e2f347f5d3660d6239334050eb557a6f005d7d8184ea9", + ) self.log.info("Check m_assumeutxo_data at height 210.") mine_blocks(100) assert_equal(node.getblockchaininfo()["blocks"], 210) - assert_equal(node.gettxoutsetinfo()["hash_serialized"], - "d6089fa8d2100926326cacdd452231e30bb4e64f07aa5bfec96e055ac2a9a87a") + assert_equal( + node.gettxoutsetinfo()["hash_serialized"], + "d6089fa8d2100926326cacdd452231e30bb4e64f07aa5bfec96e055ac2a9a87a", + ) -if __name__ == '__main__': +if __name__ == "__main__": DeterministicChainSetupTest().main() diff --git a/test/functional/feature_dirsymlinks.py b/test/functional/feature_dirsymlinks.py --- a/test/functional/feature_dirsymlinks.py +++ b/test/functional/feature_dirsymlinks.py @@ -19,7 +19,7 @@ class SymlinkTest(BitcoinTestFramework): def skip_test_if_missing_module(self): - if sys.platform == 'win32': + if sys.platform == "win32": raise SkipTest("Symlinks test skipped on Windows") def set_test_params(self): @@ -30,15 +30,15 @@ rename_and_link( from_name=os.path.join(self.nodes[0].datadir, self.chain, "blocks"), - to_name=os.path.join(self.nodes[0].datadir, self.chain, "newblocks") + to_name=os.path.join(self.nodes[0].datadir, self.chain, "newblocks"), ) rename_and_link( from_name=os.path.join(self.nodes[0].datadir, self.chain, "chainstate"), - to_name=os.path.join(self.nodes[0].datadir, self.chain, "newchainstate") + to_name=os.path.join(self.nodes[0].datadir, self.chain, "newchainstate"), ) self.start_node(0) -if __name__ == '__main__': +if __name__ == "__main__": SymlinkTest().main() diff --git a/test/functional/feature_filelock.py b/test/functional/feature_filelock.py --- a/test/functional/feature_filelock.py +++ b/test/functional/feature_filelock.py @@ -24,29 +24,36 @@ self.log.info(f"Using datadir {datadir}") self.log.info( - "Check that we can't start a second bitcoind instance using the same datadir") + "Check that we can't start a second bitcoind instance using the same" + " datadir" + ) expected_msg = ( f"Error: Cannot obtain a lock on data directory {datadir}. " f"{self.config['environment']['PACKAGE_NAME']} is probably already running." ) self.nodes[1].assert_start_raises_init_error( - extra_args=[f'-datadir={self.nodes[0].datadir}', '-noserver'], - expected_msg=expected_msg) + extra_args=[f"-datadir={self.nodes[0].datadir}", "-noserver"], + expected_msg=expected_msg, + ) if self.is_wallet_compiled(): self.nodes[0].createwallet(self.default_wallet_name) - wallet_dir = os.path.join(datadir, 'wallets') + wallet_dir = os.path.join(datadir, "wallets") self.log.info( - "Check that we can't start a second bitcoind instance using the same wallet") + "Check that we can't start a second bitcoind instance using the same" + " wallet" + ) expected_msg = "Error: Error initializing wallet database environment" self.nodes[1].assert_start_raises_init_error( extra_args=[ - f'-walletdir={wallet_dir}', - f'-wallet={self.default_wallet_name}', - '-noserver'], + f"-walletdir={wallet_dir}", + f"-wallet={self.default_wallet_name}", + "-noserver", + ], expected_msg=expected_msg, - match=ErrorMatch.PARTIAL_REGEX) + match=ErrorMatch.PARTIAL_REGEX, + ) -if __name__ == '__main__': +if __name__ == "__main__": FilelockTest().main() diff --git a/test/functional/feature_help.py b/test/functional/feature_help.py --- a/test/functional/feature_help.py +++ b/test/functional/feature_help.py @@ -37,28 +37,27 @@ def run_test(self): self.log.info("Start bitcoin with -h for help text") - self.nodes[0].start(extra_args=['-h']) + self.nodes[0].start(extra_args=["-h"]) # Node should exit immediately and output help to stdout. output, _ = self.get_node_output(ret_code_expected=0) - assert b'Options' in output + assert b"Options" in output self.log.info(f"Help text received: {output[0:60]} (...)") self.log.info("Start bitcoin with -version for version information") - self.nodes[0].start(extra_args=['-version']) + self.nodes[0].start(extra_args=["-version"]) # Node should exit immediately and output version to stdout. output, _ = self.get_node_output(ret_code_expected=0) - assert b'version' in output + assert b"version" in output self.log.info(f"Version text received: {output[0:60]} (...)") # Test that arguments not in the help results in an error - self.log.info( - "Start bitcoind with -fakearg to make sure it does not start") - self.nodes[0].start(extra_args=['-fakearg']) + self.log.info("Start bitcoind with -fakearg to make sure it does not start") + self.nodes[0].start(extra_args=["-fakearg"]) # Node should exit immediately and output an error to stderr _, output = self.get_node_output(ret_code_expected=1) - assert b'Error parsing command line arguments' in output + assert b"Error parsing command line arguments" in output self.log.info(f"Error message received: {output[0:60]} (...)") -if __name__ == '__main__': +if __name__ == "__main__": HelpTest().main() diff --git a/test/functional/feature_includeconf.py b/test/functional/feature_includeconf.py --- a/test/functional/feature_includeconf.py +++ b/test/functional/feature_includeconf.py @@ -27,36 +27,66 @@ super().setup_chain() # Create additional config files # - tmpdir/node0/relative.conf - with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f: + with open( + os.path.join(self.options.tmpdir, "node0", "relative.conf"), + "w", + encoding="utf8", + ) as f: f.write("uacomment=relative\n") # - tmpdir/node0/relative2.conf - with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f: + with open( + os.path.join(self.options.tmpdir, "node0", "relative2.conf"), + "w", + encoding="utf8", + ) as f: f.write("uacomment=relative2\n") - with open(os.path.join(self.options.tmpdir, "node0", "bitcoin.conf"), "a", encoding='utf8') as f: + with open( + os.path.join(self.options.tmpdir, "node0", "bitcoin.conf"), + "a", + encoding="utf8", + ) as f: f.write("uacomment=main\nincludeconf=relative.conf\n") def run_test(self): self.log.info( - "-includeconf works from config file. subversion should end with 'main; relative)/'") + "-includeconf works from config file. subversion should end with 'main;" + " relative)/'" + ) subversion = self.nodes[0].getnetworkinfo()["subversion"] assert subversion.endswith("main; relative)/") self.log.info("-includeconf cannot be used as command-line arg") self.stop_node(0) - self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], - expected_msg="Error: Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf") + self.nodes[0].assert_start_raises_init_error( + extra_args=["-includeconf=relative2.conf"], + expected_msg=( + "Error: Error parsing command line arguments: -includeconf cannot be" + " used from commandline; -includeconf=relative2.conf" + ), + ) self.log.info( - "-includeconf cannot be used recursively. subversion should end with 'main; relative)/'") - with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f: + "-includeconf cannot be used recursively. subversion should end with 'main;" + " relative)/'" + ) + with open( + os.path.join(self.options.tmpdir, "node0", "relative.conf"), + "a", + encoding="utf8", + ) as f: f.write("includeconf=relative2.conf\n") self.start_node(0) subversion = self.nodes[0].getnetworkinfo()["subversion"] assert subversion.endswith("main; relative)/") self.stop_node( - 0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf") + 0, + expected_stderr=( + "warning: -includeconf cannot be used from included files; ignoring" + " -includeconf=relative2.conf" + ), + ) self.log.info("-includeconf cannot contain invalid arg") @@ -70,15 +100,29 @@ self.log.info("-includeconf cannot be invalid path") os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf")) self.nodes[0].assert_start_raises_init_error( - expected_msg="Error: Error reading configuration file: Failed to include configuration file relative.conf") + expected_msg=( + "Error: Error reading configuration file: Failed to include" + " configuration file relative.conf" + ) + ) self.log.info( - "multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'") - with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f: + "multiple -includeconf args can be used from the base config file." + " subversion should end with 'main; relative; relative2)/'" + ) + with open( + os.path.join(self.options.tmpdir, "node0", "relative.conf"), + "w", + encoding="utf8", + ) as f: # Restore initial file contents f.write("uacomment=relative\n") - with open(os.path.join(self.options.tmpdir, "node0", "bitcoin.conf"), "a", encoding='utf8') as f: + with open( + os.path.join(self.options.tmpdir, "node0", "bitcoin.conf"), + "a", + encoding="utf8", + ) as f: f.write("includeconf=relative2.conf\n") self.start_node(0) @@ -87,5 +131,5 @@ assert subversion.endswith("main; relative; relative2)/") -if __name__ == '__main__': +if __name__ == "__main__": IncludeConfTest().main() diff --git a/test/functional/feature_init.py b/test/functional/feature_init.py --- a/test/functional/feature_init.py +++ b/test/functional/feature_init.py @@ -32,7 +32,7 @@ # and other approaches (like below) don't work: # # os.kill(node.process.pid, signal.CTRL_C_EVENT) - if os.name == 'nt': + if os.name == "nt": raise SkipTest("can't SIGTERM on Windows") self.stop_node(0) @@ -49,53 +49,52 @@ assert_equal(200, node.getblockcount()) lines_to_terminate_after = [ - b'Validating signatures for all blocks', - b'scheduler thread start', - b'Starting HTTP server', - b'Loading P2P addresses', - b'Loading banlist', - b'Loading block index', - b'Switching active chainstate', - b'Checking all blk files are present', - b'Loaded best chain:', - b'init message: Verifying blocks', - b'init message: Starting network threads', - b'net thread start', - b'addcon thread start', - b'loadblk thread start', - b'txindex thread start', - b'block filter index thread start', - b'coinstatsindex thread start', - b'msghand thread start', - b'net thread start', - b'addcon thread start', + b"Validating signatures for all blocks", + b"scheduler thread start", + b"Starting HTTP server", + b"Loading P2P addresses", + b"Loading banlist", + b"Loading block index", + b"Switching active chainstate", + b"Checking all blk files are present", + b"Loaded best chain:", + b"init message: Verifying blocks", + b"init message: Starting network threads", + b"net thread start", + b"addcon thread start", + b"loadblk thread start", + b"txindex thread start", + b"block filter index thread start", + b"coinstatsindex thread start", + b"msghand thread start", + b"net thread start", + b"addcon thread start", ] if self.is_wallet_compiled(): - lines_to_terminate_after.append(b'Verifying wallet') + lines_to_terminate_after.append(b"Verifying wallet") for terminate_line in lines_to_terminate_after: - self.log.info( - f"Starting node and will exit after line {terminate_line}") + self.log.info(f"Starting node and will exit after line {terminate_line}") with node.wait_for_debug_log([terminate_line]): node.start( extra_args=[ - '-txindex=1', - '-blockfilterindex=1', - '-coinstatsindex=1', - ]) + "-txindex=1", + "-blockfilterindex=1", + "-coinstatsindex=1", + ] + ) self.log.debug("Terminating node after terminate line was found") sigterm_node() check_clean_start() self.stop_node(0) - self.log.info( - "Test startup errors after removing certain essential files") + self.log.info("Test startup errors after removing certain essential files") files_to_disturb = { - 'blocks/index/*.ldb': 'Error opening block database.', - 'chainstate/*.ldb': 'Error opening block database.', - 'blocks/blk*.dat': 'Error loading block database.', + "blocks/index/*.ldb": "Error opening block database.", + "chainstate/*.ldb": "Error opening block database.", + "blocks/blk*.dat": "Error loading block database.", } for file_patt, err_fragment in files_to_disturb.items(): @@ -119,9 +118,9 @@ node.assert_start_raises_init_error( extra_args=[ - '-txindex=1', - '-blockfilterindex=1', - '-coinstatsindex=1', + "-txindex=1", + "-blockfilterindex=1", + "-coinstatsindex=1", ], expected_msg=err_fragment, match=ErrorMatch.PARTIAL_REGEX, @@ -129,13 +128,12 @@ for target_file in target_files: bak_path = f"{target_file}.bak" - self.log.debug( - f"Restoring file from {bak_path} and restarting") + self.log.debug(f"Restoring file from {bak_path} and restarting") Path(bak_path).rename(target_file) check_clean_start() self.stop_node(0) -if __name__ == '__main__': +if __name__ == "__main__": InitStressTest().main() diff --git a/test/functional/feature_loadblock.py b/test/functional/feature_loadblock.py --- a/test/functional/feature_loadblock.py +++ b/test/functional/feature_loadblock.py @@ -37,10 +37,9 @@ bootstrap_file = os.path.join(self.options.tmpdir, "bootstrap.dat") genesis_block = self.nodes[0].getblockhash(0) blocks_dir = os.path.join(data_dir, self.chain, "blocks") - hash_list = tempfile.NamedTemporaryFile(dir=data_dir, - mode='w', - delete=False, - encoding="utf-8") + hash_list = tempfile.NamedTemporaryFile( + dir=data_dir, mode="w", delete=False, encoding="utf-8" + ) self.log.info("Create linearization config file") with open(cfg_file, "a", encoding="utf-8") as cfg: @@ -60,27 +59,25 @@ linearize_dir = os.path.join(base_dir, "contrib", "linearize") self.log.info("Run linearization of block hashes") - linearize_hashes_file = os.path.join( - linearize_dir, "linearize-hashes.py") - subprocess.run([sys.executable, linearize_hashes_file, cfg_file], - stdout=hash_list, - check=True) + linearize_hashes_file = os.path.join(linearize_dir, "linearize-hashes.py") + subprocess.run( + [sys.executable, linearize_hashes_file, cfg_file], + stdout=hash_list, + check=True, + ) self.log.info("Run linearization of block data") linearize_data_file = os.path.join(linearize_dir, "linearize-data.py") - subprocess.run([sys.executable, linearize_data_file, cfg_file], - check=True) + subprocess.run([sys.executable, linearize_data_file, cfg_file], check=True) self.log.info("Restart second, unsynced node with bootstrap file") self.restart_node(1, extra_args=[f"-loadblock={bootstrap_file}"]) # start_node is blocking on all block files being imported assert_equal(self.nodes[1].getblockcount(), 100) - assert_equal(self.nodes[1].getblockchaininfo()['blocks'], 100) - assert_equal( - self.nodes[0].getbestblockhash(), - self.nodes[1].getbestblockhash()) + assert_equal(self.nodes[1].getblockchaininfo()["blocks"], 100) + assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash()) -if __name__ == '__main__': +if __name__ == "__main__": LoadblockTest().main() diff --git a/test/functional/feature_logging.py b/test/functional/feature_logging.py --- a/test/functional/feature_logging.py +++ b/test/functional/feature_logging.py @@ -38,7 +38,8 @@ self.stop_node(0) exp_stderr = r"Error: Could not open debug log file \S+$" self.nodes[0].assert_start_raises_init_error( - [f"-debuglogfile={invalidname}"], exp_stderr, match=ErrorMatch.FULL_REGEX) + [f"-debuglogfile={invalidname}"], exp_stderr, match=ErrorMatch.FULL_REGEX + ) assert not os.path.isfile(os.path.join(invdir, "foo.log")) # check that invalid log (relative) works after path exists @@ -52,7 +53,8 @@ invdir = os.path.join(self.options.tmpdir, "foo") invalidname = os.path.join(invdir, "foo.log") self.nodes[0].assert_start_raises_init_error( - [f"-debuglogfile={invalidname}"], exp_stderr, match=ErrorMatch.FULL_REGEX) + [f"-debuglogfile={invalidname}"], exp_stderr, match=ErrorMatch.FULL_REGEX + ) assert not os.path.isfile(os.path.join(invdir, "foo.log")) # check that invalid log (absolute) works after path exists @@ -72,5 +74,5 @@ self.restart_node(0, [f"-debuglogfile={os.devnull}"]) -if __name__ == '__main__': +if __name__ == "__main__": LoggingTest().main() diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py --- a/test/functional/feature_maxuploadtarget.py +++ b/test/functional/feature_maxuploadtarget.py @@ -40,10 +40,12 @@ self.setup_clean_chain = True self.num_nodes = 1 # Start a node with maxuploadtarget of 200 MB (/24h) - self.extra_args = [[ - "-maxuploadtarget=200", - "-acceptnonstdtxn=1", - ]] + self.extra_args = [ + [ + "-maxuploadtarget=200", + "-acceptnonstdtxn=1", + ] + ] self.supports_cli = False # Cache for utxos, as the listunspent may take a long time later in the @@ -76,7 +78,7 @@ # Store the hash; we'll request this later big_old_block = self.nodes[0].getbestblockhash() - old_block_size = self.nodes[0].getblock(big_old_block, True)['size'] + old_block_size = self.nodes[0].getblock(big_old_block, True)["size"] big_old_block = int(big_old_block, 16) # Advance to two days ago @@ -113,8 +115,7 @@ p2p_conns[0].send_message(getdata_request) p2p_conns[0].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 2) - self.log.info( - "Peer 0 disconnected after downloading old block too many times") + self.log.info("Peer 0 disconnected after downloading old block too many times") # Requesting the current block on p2p_conns[1] should succeed indefinitely, # even when over the max upload target. @@ -148,10 +149,17 @@ self.nodes[0].disconnect_p2ps() - self.log.info("Restarting node 0 with download permission" - " and 1MB maxuploadtarget") - self.restart_node(0, ["-whitelist=download@127.0.0.1", - "-maxuploadtarget=1", "-blockmaxsize=999000"]) + self.log.info( + "Restarting node 0 with download permission and 1MB maxuploadtarget" + ) + self.restart_node( + 0, + [ + "-whitelist=download@127.0.0.1", + "-maxuploadtarget=1", + "-blockmaxsize=999000", + ], + ) # Reconnect to self.nodes[0] peer = self.nodes[0].add_p2p_connection(TestP2PConn()) @@ -160,19 +168,20 @@ getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)] for i in range(20): peer.send_and_ping(getdata_request) - assert_equal( - peer.block_receive_map[big_new_block], i + 1) + assert_equal(peer.block_receive_map[big_new_block], i + 1) getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)] peer.send_and_ping(getdata_request) self.log.info( - "Peer still connected after trying to download old block (download permission)") + "Peer still connected after trying to download old block (download" + " permission)" + ) peer_info = self.nodes[0].getpeerinfo() # node is still connected assert_equal(len(peer_info), 1) - assert_equal(peer_info[0]['permissions'], ['download']) + assert_equal(peer_info[0]["permissions"], ["download"]) -if __name__ == '__main__': +if __name__ == "__main__": MaxUploadTest().main() diff --git a/test/functional/feature_minchainwork.py b/test/functional/feature_minchainwork.py --- a/test/functional/feature_minchainwork.py +++ b/test/functional/feature_minchainwork.py @@ -28,8 +28,7 @@ def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 - self.extra_args = [[], ["-minimumchainwork=0x65"], - ["-minimumchainwork=0x65"]] + self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]] self.node_min_work = [0, 101, 101] def setup_network(self): @@ -47,21 +46,23 @@ # minchainwork is exceeded starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work self.log.info( - f"Testing relay across node 1 (minChainWork = {self.node_min_work[1]})") + f"Testing relay across node 1 (minChainWork = {self.node_min_work[1]})" + ) starting_blockcount = self.nodes[2].getblockcount() num_blocks_to_generate = int( - (self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK) + (self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK + ) self.log.info(f"Generating {num_blocks_to_generate} blocks on node0") hashes = self.generate( - self.nodes[0], - num_blocks_to_generate, - sync_fun=self.no_op) + self.nodes[0], num_blocks_to_generate, sync_fun=self.no_op + ) self.log.info( "Node0 current chain work: " - f"{self.nodes[0].getblockheader(hashes[-1])['chainwork']}") + f"{self.nodes[0].getblockheader(hashes[-1])['chainwork']}" + ) # Sleep a few seconds and verify that node2 didn't get any new blocks # or headers. We sleep, rather than sync_blocks(node0, node1) because @@ -74,10 +75,9 @@ # Node2 shouldn't have any new headers yet, because node1 should not # have relayed anything. assert_equal(len(self.nodes[2].getchaintips()), 1) - assert_equal(self.nodes[2].getchaintips()[0]['height'], 0) + assert_equal(self.nodes[2].getchaintips()[0]["height"], 0) - assert self.nodes[1].getbestblockhash( - ) != self.nodes[0].getbestblockhash() + assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash() assert_equal(self.nodes[2].getblockcount(), starting_blockcount) self.log.info("Generating one more block") @@ -95,5 +95,5 @@ self.log.info(f"Blockcounts: {[n.getblockcount() for n in self.nodes]}") -if __name__ == '__main__': +if __name__ == "__main__": MinimumChainWorkTest().main() diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py --- a/test/functional/feature_notifications.py +++ b/test/functional/feature_notifications.py @@ -14,13 +14,13 @@ # Linux allow all characters other than \x00 # Windows disallow control characters (0-31) and /\?%:|"<> -FILE_CHAR_START = 32 if os.name == 'nt' else 1 +FILE_CHAR_START = 32 if os.name == "nt" else 1 FILE_CHAR_END = 128 -FILE_CHAR_BLACKLIST = '/\\?%*:|"<>' if os.name == 'nt' else '/' +FILE_CHAR_BLACKLIST = '/\\?%*:|"<>' if os.name == "nt" else "/" def notify_outputname(walletname, txid): - return txid if os.name == 'nt' else f'{walletname}_{txid}' + return txid if os.name == "nt" else f"{walletname}_{txid}" class NotificationsTest(BitcoinTestFramework): @@ -29,41 +29,51 @@ self.setup_clean_chain = True def setup_network(self): - self.wallet = ''.join( - chr(i) for i in range( - FILE_CHAR_START, - FILE_CHAR_END) if chr(i) not in FILE_CHAR_BLACKLIST) + self.wallet = "".join( + chr(i) + for i in range(FILE_CHAR_START, FILE_CHAR_END) + if chr(i) not in FILE_CHAR_BLACKLIST + ) self.alertnotify_dir = os.path.join(self.options.tmpdir, "alertnotify") self.blocknotify_dir = os.path.join(self.options.tmpdir, "blocknotify") - self.walletnotify_dir = os.path.join( - self.options.tmpdir, "walletnotify") + self.walletnotify_dir = os.path.join(self.options.tmpdir, "walletnotify") os.mkdir(self.alertnotify_dir) os.mkdir(self.blocknotify_dir) os.mkdir(self.walletnotify_dir) # -alertnotify and -blocknotify on node0, walletnotify on node1 self.extra_args = [ - [f"-alertnotify=echo > {os.path.join(self.alertnotify_dir, '%s')}", - f"-blocknotify=echo > {os.path.join(self.blocknotify_dir, '%s')}"], - ["-rescan", - f"-walletnotify=echo > {os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s'))}"]] + [ + f"-alertnotify=echo > {os.path.join(self.alertnotify_dir, '%s')}", + f"-blocknotify=echo > {os.path.join(self.blocknotify_dir, '%s')}", + ], + [ + "-rescan", + ( + "-walletnotify=echo >" + f" {os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s'))}" + ), + ], + ] self.wallet_names = [self.default_wallet_name, self.wallet] super().setup_network() def run_test(self): self.log.info("test -blocknotify") block_count = 10 - blocks = self.generatetoaddress(self.nodes[1], - block_count, - self.nodes[1].getnewaddress() if self.is_wallet_compiled() - else ADDRESS_ECREG_UNSPENDABLE - ) + blocks = self.generatetoaddress( + self.nodes[1], + block_count, + self.nodes[1].getnewaddress() + if self.is_wallet_compiled() + else ADDRESS_ECREG_UNSPENDABLE, + ) # wait at most 10 seconds for expected number of files before reading # the content self.wait_until( - lambda: len(os.listdir(self.blocknotify_dir)) == block_count, - timeout=10) + lambda: len(os.listdir(self.blocknotify_dir)) == block_count, timeout=10 + ) # directory content should equal the generated blocks hashes assert_equal(sorted(blocks), sorted(os.listdir(self.blocknotify_dir))) @@ -74,15 +84,15 @@ # reading the content self.wait_until( lambda: len(os.listdir(self.walletnotify_dir)) == block_count, - timeout=10) + timeout=10, + ) # directory content should equal the generated transaction hashes - txids_rpc = [notify_outputname(self.wallet, t['txid']) - for t in self.nodes[1].listtransactions("*", block_count)] - assert_equal( - sorted(txids_rpc), sorted( - os.listdir( - self.walletnotify_dir))) + txids_rpc = [ + notify_outputname(self.wallet, t["txid"]) + for t in self.nodes[1].listtransactions("*", block_count) + ] + assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir))) self.stop_node(1) for tx_file in os.listdir(self.walletnotify_dir): os.remove(os.path.join(self.walletnotify_dir, tx_file)) @@ -94,15 +104,15 @@ self.wait_until( lambda: len(os.listdir(self.walletnotify_dir)) == block_count, - timeout=10) + timeout=10, + ) # directory content should equal the generated transaction hashes - txids_rpc = [notify_outputname(self.wallet, t['txid']) - for t in self.nodes[1].listtransactions("*", block_count)] - assert_equal( - sorted(txids_rpc), sorted( - os.listdir( - self.walletnotify_dir))) + txids_rpc = [ + notify_outputname(self.wallet, t["txid"]) + for t in self.nodes[1].listtransactions("*", block_count) + ] + assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir))) for tx_file in os.listdir(self.walletnotify_dir): os.remove(os.path.join(self.walletnotify_dir, tx_file)) @@ -113,16 +123,18 @@ self.nodes[0].sethdseed( seed=self.nodes[1].dumpprivkey( keyhash_to_p2pkh( - bytes.fromhex( - self.nodes[1].getwalletinfo()['hdseedid'])[::-1]))) + bytes.fromhex(self.nodes[1].getwalletinfo()["hdseedid"])[::-1] + ) + ) + ) self.nodes[0].rescanblockchain() - self.generatetoaddress( - self.nodes[0], 100, ADDRESS_ECREG_UNSPENDABLE) + self.generatetoaddress(self.nodes[0], 100, ADDRESS_ECREG_UNSPENDABLE) # Generate transaction on node 0, sync mempools, and check for # notification on node 1. tx1 = self.nodes[0].sendtoaddress( - address=ADDRESS_ECREG_UNSPENDABLE, amount=100) + address=ADDRESS_ECREG_UNSPENDABLE, amount=100 + ) assert_equal(tx1 in self.nodes[0].getrawmempool(), True) self.sync_mempools() self.expect_wallet_notify([tx1]) @@ -141,26 +153,26 @@ balance = self.nodes[0].getbalance() self.disconnect_nodes(0, 1) tx2_node0 = self.nodes[0].sendtoaddress( - address=ADDRESS_ECREG_UNSPENDABLE, amount=balance - 20) + address=ADDRESS_ECREG_UNSPENDABLE, amount=balance - 20 + ) tx2_node1 = self.nodes[1].sendtoaddress( - address=ADDRESS_ECREG_UNSPENDABLE, amount=balance - 21) + address=ADDRESS_ECREG_UNSPENDABLE, amount=balance - 21 + ) assert tx2_node0 != tx2_node1 self.expect_wallet_notify([tx2_node1]) # So far tx2_node1 has no conflicting tx - assert not self.nodes[1].gettransaction( - tx2_node1)['walletconflicts'] + assert not self.nodes[1].gettransaction(tx2_node1)["walletconflicts"] # Mine a block on node0, reconnect the nodes, check that tx2_node1 # has a conflicting tx after syncing with node0. self.generatetoaddress( - self.nodes[0], - 1, - ADDRESS_ECREG_UNSPENDABLE, - sync_fun=self.no_op) + self.nodes[0], 1, ADDRESS_ECREG_UNSPENDABLE, sync_fun=self.no_op + ) self.connect_nodes(0, 1) self.sync_blocks() - assert tx2_node0 in self.nodes[1].gettransaction(tx2_node1)[ - 'walletconflicts'] + assert ( + tx2_node0 in self.nodes[1].gettransaction(tx2_node1)["walletconflicts"] + ) # node1's wallet will notify of the new confirmed transaction tx2_0 # and about the conflicted transaction tx2_1. @@ -177,28 +189,29 @@ self.nodes[0].invalidateblock(invalid_block) # Give bitcoind 10 seconds to write the alert notification - self.wait_until(lambda: len(os.listdir(self.alertnotify_dir)), - timeout=10) + self.wait_until(lambda: len(os.listdir(self.alertnotify_dir)), timeout=10) # The notification command is unable to properly handle the spaces on # windows. Skip the content check in this case. - if os.name != 'nt': - assert FORK_WARNING_MESSAGE.format( - fork_block) in os.listdir(self.alertnotify_dir) + if os.name != "nt": + assert FORK_WARNING_MESSAGE.format(fork_block) in os.listdir( + self.alertnotify_dir + ) for notify_file in os.listdir(self.alertnotify_dir): os.remove(os.path.join(self.alertnotify_dir, notify_file)) def expect_wallet_notify(self, tx_ids): self.wait_until( - lambda: len(os.listdir(self.walletnotify_dir)) >= len(tx_ids), - timeout=10) + lambda: len(os.listdir(self.walletnotify_dir)) >= len(tx_ids), timeout=10 + ) assert_equal( sorted(notify_outputname(self.wallet, tx_id) for tx_id in tx_ids), - sorted(os.listdir(self.walletnotify_dir))) + sorted(os.listdir(self.walletnotify_dir)), + ) for tx_file in os.listdir(self.walletnotify_dir): os.remove(os.path.join(self.walletnotify_dir, tx_file)) -if __name__ == '__main__': +if __name__ == "__main__": NotificationsTest().main() diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py --- a/test/functional/feature_proxy.py +++ b/test/functional/feature_proxy.py @@ -64,19 +64,19 @@ # Create two proxies on different ports # ... one unauthenticated self.conf1 = Socks5Configuration() - self.conf1.addr = ('127.0.0.1', p2p_port(self.num_nodes)) + self.conf1.addr = ("127.0.0.1", p2p_port(self.num_nodes)) self.conf1.unauth = True self.conf1.auth = False # ... one supporting authenticated and unauthenticated (Tor) self.conf2 = Socks5Configuration() - self.conf2.addr = ('127.0.0.1', p2p_port(self.num_nodes + 1)) + self.conf2.addr = ("127.0.0.1", p2p_port(self.num_nodes + 1)) self.conf2.unauth = True self.conf2.auth = True if self.have_ipv6: # ... one on IPv6 with similar configuration self.conf3 = Socks5Configuration() self.conf3.af = socket.AF_INET6 - self.conf3.addr = ('::1', p2p_port(self.num_nodes + 2)) + self.conf3.addr = ("::1", p2p_port(self.num_nodes + 2)) self.conf3.unauth = True self.conf3.auth = True else: @@ -91,7 +91,7 @@ self.serv3.start() # We will not try to connect to this. - self.i2p_sam = ('127.0.0.1', 7656) + self.i2p_sam = ("127.0.0.1", 7656) # Note: proxies are not used to connect to local nodes. # This is because the proxy to use is based on CService.GetNetwork(), @@ -99,18 +99,25 @@ ip1, port1 = self.conf1.addr ip2, port2 = self.conf2.addr args = [ - ['-listen', f'-proxy={ip1}:{port1}', '-proxyrandomize=1'], - ['-listen', f'-proxy={ip1}:{port1}', f'-onion={ip2}:{port2}', - f'-i2psam={self.i2p_sam[0]}:{self.i2p_sam[1]}', '-i2pacceptincoming=0', - '-proxyrandomize=0'], - ['-listen', f'-proxy={ip2}:{port2}', '-proxyrandomize=1'], - [] + ["-listen", f"-proxy={ip1}:{port1}", "-proxyrandomize=1"], + [ + "-listen", + f"-proxy={ip1}:{port1}", + f"-onion={ip2}:{port2}", + f"-i2psam={self.i2p_sam[0]}:{self.i2p_sam[1]}", + "-i2pacceptincoming=0", + "-proxyrandomize=0", + ], + ["-listen", f"-proxy={ip2}:{port2}", "-proxyrandomize=1"], + [], ] if self.have_ipv6: args[3] = [ - '-listen', - f'-proxy=[{self.conf3.addr[0]}]:{self.conf3.addr[1]}', - '-proxyrandomize=0', '-noonion'] + "-listen", + f"-proxy=[{self.conf3.addr[0]}]:{self.conf3.addr[1]}", + "-proxyrandomize=0", + "-noonion", + ] self.add_nodes(self.num_nodes, extra_args=args) self.start_nodes() @@ -123,7 +130,8 @@ rv = [] addr = "15.61.23.23:1234" self.log.debug( - f"Test: outgoing IPv4 connection through node for address {addr}") + f"Test: outgoing IPv4 connection through node for address {addr}" + ) node.addnode(addr, "onetry") cmd = proxies[0].queue.get() assert isinstance(cmd, Socks5Command) @@ -141,7 +149,8 @@ if self.have_ipv6: addr = "[1233:3432:2434:2343:3234:2345:6546:4534]:5443" self.log.debug( - f"Test: outgoing IPv6 connection through node for address {addr}") + f"Test: outgoing IPv6 connection through node for address {addr}" + ) node.addnode(addr, "onetry") cmd = proxies[1].queue.get() assert isinstance(cmd, Socks5Command) @@ -159,7 +168,8 @@ if test_onion: addr = "bitcoinostk4e4re.onion:8333" self.log.debug( - f"Test: outgoing onion connection through node for address {addr}") + f"Test: outgoing onion connection through node for address {addr}" + ) node.addnode(addr, "onetry") cmd = proxies[2].queue.get() assert isinstance(cmd, Socks5Command) @@ -174,7 +184,8 @@ addr = "node.noumenon:8333" self.log.debug( - f"Test: outgoing DNS name connection through node for address {addr}") + f"Test: outgoing DNS name connection through node for address {addr}" + ) node.addnode(addr, "onetry") cmd = proxies[3].queue.get() assert isinstance(cmd, Socks5Command) @@ -192,15 +203,18 @@ def run_test(self): # basic -proxy self.node_test( - self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False) + self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False + ) # -proxy plus -onion self.node_test( - self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False) + self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False + ) # -proxy plus -onion, -proxyrandomize rv = self.node_test( - self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True) + self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True + ) # Check that credentials as used for -proxyrandomize connections are # unique credentials = {(x.username, x.password) for x in rv} @@ -209,12 +223,16 @@ if self.have_ipv6: # proxy on IPv6 localhost self.node_test( - self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False) + self.nodes[3], + [self.serv3, self.serv3, self.serv3, self.serv3], + False, + False, + ) def networks_dict(d): r = {} - for x in d['networks']: - r[x['name']] = x + for x in d["networks"]: + r[x["name"]] = x return r self.log.info("Test RPC getnetworkinfo") @@ -224,59 +242,55 @@ ip2, port2 = self.conf2.addr for net in NETWORKS: if net == NET_I2P: - expected_proxy = '' + expected_proxy = "" expected_randomize = False else: - expected_proxy = f'{ip1}:{port1}' + expected_proxy = f"{ip1}:{port1}" expected_randomize = True - assert_equal(n0[net]['proxy'], expected_proxy) - assert_equal( - n0[net]['proxy_randomize_credentials'], - expected_randomize) - assert_equal(n0['onion']['reachable'], True) - assert_equal(n0['i2p']['reachable'], False) + assert_equal(n0[net]["proxy"], expected_proxy) + assert_equal(n0[net]["proxy_randomize_credentials"], expected_randomize) + assert_equal(n0["onion"]["reachable"], True) + assert_equal(n0["i2p"]["reachable"], False) n1 = networks_dict(self.nodes[1].getnetworkinfo()) assert_equal(NETWORKS, n1.keys()) - for net in ['ipv4', 'ipv6']: - assert_equal(n1[net]['proxy'], f'{ip1}:{port1}') - assert_equal(n1[net]['proxy_randomize_credentials'], False) - assert_equal(n1['onion']['proxy'], f'{ip2}:{port2}') - assert_equal(n1['onion']['proxy_randomize_credentials'], False) - assert_equal(n1['onion']['reachable'], True) - assert_equal(n1['i2p']['proxy'], f'{self.i2p_sam[0]}:{self.i2p_sam[1]}') - assert_equal(n1['i2p']['proxy_randomize_credentials'], False) - assert_equal(n1['i2p']['reachable'], True) + for net in ["ipv4", "ipv6"]: + assert_equal(n1[net]["proxy"], f"{ip1}:{port1}") + assert_equal(n1[net]["proxy_randomize_credentials"], False) + assert_equal(n1["onion"]["proxy"], f"{ip2}:{port2}") + assert_equal(n1["onion"]["proxy_randomize_credentials"], False) + assert_equal(n1["onion"]["reachable"], True) + assert_equal(n1["i2p"]["proxy"], f"{self.i2p_sam[0]}:{self.i2p_sam[1]}") + assert_equal(n1["i2p"]["proxy_randomize_credentials"], False) + assert_equal(n1["i2p"]["reachable"], True) n2 = networks_dict(self.nodes[2].getnetworkinfo()) assert_equal(NETWORKS, n2.keys()) for net in NETWORKS: if net == NET_I2P: - expected_proxy = '' + expected_proxy = "" expected_randomize = False else: - expected_proxy = f'{ip2}:{port2}' + expected_proxy = f"{ip2}:{port2}" expected_randomize = True - assert_equal(n2[net]['proxy'], expected_proxy) - assert_equal( - n2[net]['proxy_randomize_credentials'], - expected_randomize) - assert_equal(n2['onion']['reachable'], True) - assert_equal(n2['i2p']['reachable'], False) + assert_equal(n2[net]["proxy"], expected_proxy) + assert_equal(n2[net]["proxy_randomize_credentials"], expected_randomize) + assert_equal(n2["onion"]["reachable"], True) + assert_equal(n2["i2p"]["reachable"], False) if self.have_ipv6: n3 = networks_dict(self.nodes[3].getnetworkinfo()) assert_equal(NETWORKS, n3.keys()) for net in NETWORKS: if net == NET_I2P: - expected_proxy = '' + expected_proxy = "" else: - expected_proxy = f'[{self.conf3.addr[0]}]:{self.conf3.addr[1]}' - assert_equal(n3[net]['proxy'], expected_proxy) - assert_equal(n3[net]['proxy_randomize_credentials'], False) - assert_equal(n3['onion']['reachable'], False) - assert_equal(n3['i2p']['reachable'], False) + expected_proxy = f"[{self.conf3.addr[0]}]:{self.conf3.addr[1]}" + assert_equal(n3[net]["proxy"], expected_proxy) + assert_equal(n3[net]["proxy_randomize_credentials"], False) + assert_equal(n3["onion"]["reachable"], False) + assert_equal(n3["i2p"]["reachable"], False) -if __name__ == '__main__': +if __name__ == "__main__": ProxyTest().main() diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -42,14 +42,13 @@ big_script = CScript([OP_RETURN] + [OP_NOP] * 950000) best_block = node.getblock(node.getbestblockhash()) height = int(best_block["height"]) + 1 - mine_large_blocks.nTime = max( - mine_large_blocks.nTime, int(best_block["time"])) + 1 + mine_large_blocks.nTime = max(mine_large_blocks.nTime, int(best_block["time"])) + 1 previousblockhash = int(best_block["hash"], 16) for _ in range(n): # Build the coinbase transaction (with large scriptPubKey) coinbase_tx = create_coinbase(height) - coinbase_tx.vin[0].nSequence = 2 ** 32 - 1 + coinbase_tx.vin[0].nSequence = 2**32 - 1 coinbase_tx.vout[0].scriptPubKey = big_script coinbase_tx.rehash() @@ -58,7 +57,7 @@ block.nVersion = best_block["version"] block.hashPrevBlock = previousblockhash block.nTime = mine_large_blocks.nTime - block.nBits = int('207fffff', 16) + block.nBits = int("207fffff", 16) block.nNonce = 0 block.vtx = [coinbase_tx] block.hashMerkleRoot = block.calc_merkle_root() @@ -73,8 +72,11 @@ def calc_usage(blockdir): - return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) - if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.) + return sum( + os.path.getsize(blockdir + f) + for f in os.listdir(blockdir) + if os.path.isfile(os.path.join(blockdir, f)) + ) / (1024.0 * 1024.0) class PruneTest(BitcoinTestFramework): @@ -85,19 +87,22 @@ # Create nodes 0 and 1 to mine. # Create node 2 to test pruning. - self.full_node_default_args = ["-maxreceivebuffer=20000", "-blockmaxsize=999000", - "-checkblocks=5", "-noparkdeepreorg"] + self.full_node_default_args = [ + "-maxreceivebuffer=20000", + "-blockmaxsize=999000", + "-checkblocks=5", + "-noparkdeepreorg", + ] # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) # Create nodes 5 to test wallet in prune mode, but do not connect - self.extra_args = [self.full_node_default_args, - self.full_node_default_args, - ["-maxreceivebuffer=20000", "-prune=550", - "-noparkdeepreorg"], - ["-maxreceivebuffer=20000", "-blockmaxsize=999000", - "-noparkdeepreorg"], - ["-maxreceivebuffer=20000", "-blockmaxsize=999000", - "-noparkdeepreorg"], - ["-prune=550"]] + self.extra_args = [ + self.full_node_default_args, + self.full_node_default_args, + ["-maxreceivebuffer=20000", "-prune=550", "-noparkdeepreorg"], + ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-noparkdeepreorg"], + ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-noparkdeepreorg"], + ["-prune=550"], + ] self.rpc_timeout = 120 def skip_test_if_missing_module(self): @@ -106,8 +111,7 @@ def setup_network(self): self.setup_nodes() - self.prunedir = os.path.join( - self.nodes[2].datadir, self.chain, 'blocks', '') + self.prunedir = os.path.join(self.nodes[2].datadir, self.chain, "blocks", "") self.connect_nodes(0, 1) self.connect_nodes(1, 2) @@ -124,7 +128,8 @@ def create_big_chain(self): # Start by creating some coinbases we can spend later self.generate( - self.nodes[1], 200, sync_fun=lambda: self.sync_blocks(self.nodes[0:2])) + self.nodes[1], 200, sync_fun=lambda: self.sync_blocks(self.nodes[0:2]) + ) self.generate(self.nodes[0], 150, sync_fun=self.no_op) # Then mine enough full blocks to create more than 550MiB of data @@ -133,22 +138,26 @@ self.sync_blocks(self.nodes[0:5]) def test_height_min(self): - assert os.path.isfile(os.path.join( - self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early" + assert os.path.isfile( + os.path.join(self.prunedir, "blk00000.dat") + ), "blk00000.dat is missing, pruning too early" self.log.info("Success") - self.log.info("Though we're already using more than 550MiB, current usage: " - f"{calc_usage(self.prunedir)}") self.log.info( - "Mining 25 more blocks should cause the first block file to be pruned") + "Though we're already using more than 550MiB, current usage: " + f"{calc_usage(self.prunedir)}" + ) + self.log.info( + "Mining 25 more blocks should cause the first block file to be pruned" + ) # Pruning doesn't run until we're allocating another chunk, 20 full # blocks past the height cutoff will ensure this mine_large_blocks(self.nodes[0], 25) # Wait for blk00000.dat to be pruned self.wait_until( - lambda: not os.path.isfile( - os.path.join(self.prunedir, "blk00000.dat")), - timeout=30) + lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), + timeout=30, + ) self.log.info("Success") usage = calc_usage(self.prunedir) @@ -158,7 +167,9 @@ def create_chain_with_staleblocks(self): # Create stale blocks in manageable sized chunks self.log.info( - "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") + "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg" + " from Node 0, for 12 rounds" + ) for _ in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain @@ -178,8 +189,10 @@ self.connect_nodes(0, 2) self.sync_blocks(self.nodes[0:3]) - self.log.info("Usage can be over target because of high stale rate: " - f"{calc_usage(self.prunedir)}") + self.log.info( + "Usage can be over target because of high stale rate: " + f"{calc_usage(self.prunedir)}" + ) def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node @@ -219,11 +232,12 @@ self.sync_blocks(self.nodes[0:3], timeout=120) self.log.info(f"Verify height on node 2: {self.nodes[2].getblockcount()}") - self.log.info("Usage possibly still high because of stale blocks in block " - f"files: {calc_usage(self.prunedir)}") - self.log.info( - "Mine 220 more large blocks so we have requisite history") + "Usage possibly still high because of stale blocks in block " + f"files: {calc_usage(self.prunedir)}" + ) + + self.log.info("Mine 220 more large blocks so we have requisite history") mine_large_blocks(self.nodes[0], 220) self.sync_blocks(self.nodes[0:3], timeout=120) @@ -235,11 +249,19 @@ def reorg_back(self): # Verify that a block on the old main chain fork has been pruned away assert_raises_rpc_error( - -1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) - with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(pruning, no data)']): + -1, + "Block not available (pruned data)", + self.nodes[2].getblock, + self.forkhash, + ) + with self.nodes[2].assert_debug_log( + expected_msgs=[ + "block verification stopping at height", + "(pruning, no data)", + ] + ): self.nodes[2].verifychain(checklevel=4, nblocks=0) - self.log.info( - f"Will need to redownload block {self.forkheight}") + self.log.info(f"Will need to redownload block {self.forkheight}") # Verify that we have enough history to reorg back to the fork point. # Although this is more than 288 blocks, because this chain was written @@ -267,31 +289,34 @@ blocks_to_mine = first_reorg_height + 1 - self.mainchainheight self.log.info( "Rewind node 0 to prev main chain to mine longer chain to trigger " - f"redownload. Blocks needed: {blocks_to_mine}") + f"redownload. Blocks needed: {blocks_to_mine}" + ) self.nodes[0].invalidateblock(curchainhash) assert_equal(self.nodes[0].getblockcount(), self.mainchainheight) assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2) goalbesthash = self.generate( - self.nodes[0], blocks_to_mine, sync_fun=self.no_op)[-1] + self.nodes[0], blocks_to_mine, sync_fun=self.no_op + )[-1] goalbestheight = first_reorg_height + 1 self.log.info( - "Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") + "Verify node 2 reorged back to the main chain, some blocks of which it had" + " to redownload" + ) # Wait for Node 2 to reorg to proper height - self.wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, - timeout=900) + self.wait_until( + lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900 + ) assert_equal(self.nodes[2].getbestblockhash(), goalbesthash) # Verify we can now have the data for a block previously pruned - assert_equal(self.nodes[2].getblock( - self.forkhash)["height"], self.forkheight) + assert_equal(self.nodes[2].getblock(self.forkhash)["height"], self.forkheight) def manual_test(self, node_number, use_timestamp): # at this point, node has 995 blocks and has not yet run in prune mode self.start_node(node_number) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) - assert_raises_rpc_error(-1, "not in prune mode", - node.pruneblockchain, 500) + assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500) # now re-start in manual pruning mode self.restart_node(node_number, extra_args=["-prune=1"]) @@ -300,23 +325,32 @@ def height(index): if use_timestamp: - return node.getblockheader(node.getblockhash(index))[ - "time"] + TIMESTAMP_WINDOW + return ( + node.getblockheader(node.getblockhash(index))["time"] + + TIMESTAMP_WINDOW + ) else: return index def prune(index): ret = node.pruneblockchain(height=height(index)) - assert_equal(ret, node.getblockchaininfo()['pruneheight']) + assert_equal(ret, node.getblockchaininfo()["pruneheight"]) def has_block(index): - return os.path.isfile(os.path.join( - self.nodes[node_number].datadir, self.chain, "blocks", f"blk{index:05}.dat")) + return os.path.isfile( + os.path.join( + self.nodes[node_number].datadir, + self.chain, + "blocks", + f"blk{index:05}.dat", + ) + ) # should not prune because chain tip of node 3 (995) < PruneAfterHeight # (1000) assert_raises_rpc_error( - -1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) + -1, "Blockchain is too short for pruning", node.pruneblockchain, height(500) + ) # Save block transaction count before pruning, assert value block1_details = node.getblock(node.getblockhash(1)) @@ -327,48 +361,41 @@ assert_equal(node.getblockchaininfo()["blocks"], 1001) # Pruned block should still know the number of transactions - assert_equal(node.getblockheader(node.getblockhash(1)) - ["nTx"], block1_details["nTx"]) + assert_equal( + node.getblockheader(node.getblockhash(1))["nTx"], block1_details["nTx"] + ) # negative heights should raise an exception assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10) # height=100 too low to prune first block file so this is a no-op prune(100) - assert has_block( - 0), "blk00000.dat is missing when should still be there" + assert has_block(0), "blk00000.dat is missing when should still be there" # Does nothing node.pruneblockchain(height(0)) - assert has_block( - 0), "blk00000.dat is missing when should still be there" + assert has_block(0), "blk00000.dat is missing when should still be there" # height=500 should prune first file prune(500) - assert not has_block( - 0), "blk00000.dat is still there, should be pruned by now" - assert has_block( - 1), "blk00001.dat is missing when should still be there" + assert not has_block(0), "blk00000.dat is still there, should be pruned by now" + assert has_block(1), "blk00001.dat is missing when should still be there" # height=650 should prune second file prune(650) - assert not has_block( - 1), "blk00001.dat is still there, should be pruned by now" + assert not has_block(1), "blk00001.dat is still there, should be pruned by now" # height=1000 should not prune anything more, because tip-288 is in # blk00002.dat. prune(1000) - assert has_block( - 2), "blk00002.dat is still there, should be pruned by now" + assert has_block(2), "blk00002.dat is still there, should be pruned by now" # advance the tip so blk00002.dat and blk00003.dat can be pruned (the # last 288 blocks should now be in blk00004.dat) self.generate(node, 288, sync_fun=self.no_op) prune(1000) - assert not has_block( - 2), "blk00002.dat is still there, should be pruned by now" - assert not has_block( - 3), "blk00003.dat is still there, should be pruned by now" + assert not has_block(2), "blk00002.dat is still there, should be pruned by now" + assert not has_block(3), "blk00003.dat is still there, should be pruned by now" # stop node, start back up with auto-prune at 550 MiB, make sure still # runs @@ -379,8 +406,7 @@ def wallet_test(self): # check that the pruning node's wallet is still in good shape self.log.info("Stop and start pruning node to trigger wallet rescan") - self.restart_node( - 2, extra_args=["-prune=550", "-noparkdeepreorg"]) + self.restart_node(2, extra_args=["-prune=550", "-noparkdeepreorg"]) self.log.info("Success") # check that wallet loads successfully when restarting a pruned node after IBD. @@ -389,8 +415,7 @@ self.connect_nodes(0, 5) nds = [self.nodes[0], self.nodes[5]] self.sync_blocks(nds, wait=5, timeout=300) - self.restart_node( - 5, extra_args=["-prune=550", "-noparkdeepreorg"]) + self.restart_node(5, extra_args=["-prune=550", "-noparkdeepreorg"]) self.log.info("Success") def run_test(self): @@ -412,13 +437,17 @@ self.stop_node(4) self.log.info( - "Check that we haven't started pruning yet because we're below PruneAfterHeight") + "Check that we haven't started pruning yet because we're below" + " PruneAfterHeight" + ) self.test_height_min() # Extend this chain past the PruneAfterHeight # N0=N1=N2 **...*(1020) self.log.info( - "Check that we'll exceed disk space target if we have a very high stale block rate") + "Check that we'll exceed disk space target if we have a very high stale" + " block rate" + ) self.create_chain_with_staleblocks() # Disconnect N0 # And mine a 24 block chain on N1 and a separate 25 block chain on N0 @@ -476,7 +505,9 @@ # *...**(1320) self.log.info( - "Test that we can rerequest a block we previously pruned if needed for a reorg") + "Test that we can rerequest a block we previously pruned if needed for a" + " reorg" + ) self.reorg_back() # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to @@ -508,5 +539,5 @@ self.log.info("Done") -if __name__ == '__main__': +if __name__ == "__main__": PruneTest().main() diff --git a/test/functional/feature_reindex.py b/test/functional/feature_reindex.py --- a/test/functional/feature_reindex.py +++ b/test/functional/feature_reindex.py @@ -14,18 +14,17 @@ class ReindexTest(BitcoinTestFramework): - def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def reindex(self, justchainstate=False): - self.generatetoaddress(self.nodes[0], - 3, self.nodes[0].get_deterministic_priv_key().address) + self.generatetoaddress( + self.nodes[0], 3, self.nodes[0].get_deterministic_priv_key().address + ) blockcount = self.nodes[0].getblockcount() self.stop_nodes() - extra_args = [ - ["-reindex-chainstate" if justchainstate else "-reindex"]] + extra_args = [["-reindex-chainstate" if justchainstate else "-reindex"]] self.start_nodes(extra_args) # start_node is blocking on reindex assert_equal(self.nodes[0].getblockcount(), blockcount) @@ -38,5 +37,5 @@ self.reindex(True) -if __name__ == '__main__': +if __name__ == "__main__": ReindexTest().main() diff --git a/test/functional/feature_settings.py b/test/functional/feature_settings.py --- a/test/functional/feature_settings.py +++ b/test/functional/feature_settings.py @@ -19,7 +19,7 @@ self.wallet_names = [] def run_test(self): - node, = self.nodes + (node,) = self.nodes settings = Path(node.datadir, self.chain, "settings.json") conf = Path(node.datadir, "bitcoin.conf") @@ -30,20 +30,30 @@ # Assert settings are parsed and logged with settings.open("w") as fp: - json.dump({"string": "string", "num": 5, "bool": True, - "null": None, "list": [6, 7]}, fp) - with node.assert_debug_log(expected_msgs=[ - 'Ignoring unknown rw_settings value bool', - 'Ignoring unknown rw_settings value list', - 'Ignoring unknown rw_settings value null', - 'Ignoring unknown rw_settings value num', - 'Ignoring unknown rw_settings value string', + json.dump( + { + "string": "string", + "num": 5, + "bool": True, + "null": None, + "list": [6, 7], + }, + fp, + ) + with node.assert_debug_log( + expected_msgs=[ + "Ignoring unknown rw_settings value bool", + "Ignoring unknown rw_settings value list", + "Ignoring unknown rw_settings value null", + "Ignoring unknown rw_settings value num", + "Ignoring unknown rw_settings value string", 'Setting file arg: string = "string"', - 'Setting file arg: num = 5', - 'Setting file arg: bool = true', - 'Setting file arg: null = null', - 'Setting file arg: list = [6,7]', - ]): + "Setting file arg: num = 5", + "Setting file arg: bool = true", + "Setting file arg: null = null", + "Setting file arg: list = [6,7]", + ] + ): self.start_node(0) self.stop_node(0) @@ -51,39 +61,49 @@ with settings.open() as fp: assert_equal( json.load(fp), - {"string": "string", "num": 5, "bool": True, "null": None, - "list": [6, 7]}) + { + "string": "string", + "num": 5, + "bool": True, + "null": None, + "list": [6, 7], + }, + ) # Test invalid json with settings.open("w") as fp: fp.write("invalid json") node.assert_start_raises_init_error( - expected_msg='Unable to parse settings file', - match=ErrorMatch.PARTIAL_REGEX) + expected_msg="Unable to parse settings file", match=ErrorMatch.PARTIAL_REGEX + ) # Test invalid json object with settings.open("w") as fp: fp.write('"string"') node.assert_start_raises_init_error( expected_msg='Found non-object value "string" in settings file', - match=ErrorMatch.PARTIAL_REGEX) + match=ErrorMatch.PARTIAL_REGEX, + ) # Test invalid settings file containing duplicate keys with settings.open("w") as fp: fp.write('{"key": 1, "key": 2}') node.assert_start_raises_init_error( - expected_msg='Found duplicate key key in settings file', - match=ErrorMatch.PARTIAL_REGEX) + expected_msg="Found duplicate key key in settings file", + match=ErrorMatch.PARTIAL_REGEX, + ) # Test invalid settings file is ignored with command line -nosettings - with node.assert_debug_log(expected_msgs=['Command-line arg: settings=false']): + with node.assert_debug_log(expected_msgs=["Command-line arg: settings=false"]): self.start_node(0, extra_args=["-nosettings"]) self.stop_node(0) # Test invalid settings file is ignored with config file -nosettings - with conf.open('a') as conf: - conf.write('nosettings=1\n') - with node.assert_debug_log(expected_msgs=['Config file arg: [regtest] settings=false']): + with conf.open("a") as conf: + conf.write("nosettings=1\n") + with node.assert_debug_log( + expected_msgs=["Config file arg: [regtest] settings=false"] + ): self.start_node(0) self.stop_node(0) @@ -96,5 +116,5 @@ self.stop_node(0) -if __name__ == '__main__': +if __name__ == "__main__": SettingsTest().main() diff --git a/test/functional/feature_shutdown.py b/test/functional/feature_shutdown.py --- a/test/functional/feature_shutdown.py +++ b/test/functional/feature_shutdown.py @@ -12,11 +12,10 @@ def test_long_call(node): block = node.waitfornewblock() - assert_equal(block['height'], 0) + assert_equal(block["height"], 0) class ShutdownTest(BitcoinTestFramework): - def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 @@ -24,18 +23,18 @@ def run_test(self): node = get_rpc_proxy( - self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir) + self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir + ) # Force connection establishment by executing a dummy command. node.getblockcount() Thread(target=test_long_call, args=(node,)).start() # Wait until the server is executing the above `waitfornewblock`. - self.wait_until( - lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2) + self.wait_until(lambda: len(self.nodes[0].getrpcinfo()["active_commands"]) == 2) # Wait 1 second after requesting shutdown but not before the `stop` call # finishes. This is to ensure event loop waits for current connections # to close. self.stop_node(0, wait=1000) -if __name__ == '__main__': +if __name__ == "__main__": ShutdownTest().main() diff --git a/test/functional/feature_tx_version.py b/test/functional/feature_tx_version.py --- a/test/functional/feature_tx_version.py +++ b/test/functional/feature_tx_version.py @@ -24,7 +24,7 @@ from test_framework.util import assert_equal, assert_greater_than_or_equal OK_VERSIONS = [1, 2] -BAD_VERSIONS = [-0x80000000, -0x7fffffff, -2, -1, 0, 3, 7, 0x100, 0x7fffffff] +BAD_VERSIONS = [-0x80000000, -0x7FFFFFFF, -2, -1, 0, 3, 7, 0x100, 0x7FFFFFFF] START_TIME = 1_900_000_000 ACTIVATION_TIME = 2_000_000_000 @@ -34,9 +34,13 @@ def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True - self.extra_args = [[f'-wellingtonactivationtime={ACTIVATION_TIME}', - '-acceptnonstdtxn=0', - '-whitelist=127.0.0.1']] + self.extra_args = [ + [ + f"-wellingtonactivationtime={ACTIVATION_TIME}", + "-acceptnonstdtxn=0", + "-whitelist=127.0.0.1", + ] + ] def run_test(self): self.block_heights = {} @@ -71,10 +75,8 @@ tx = self.make_tx(spendable_tx, nVersion=bad_version) bad_version_txs.append(tx) peer.send_txs_and_test( - [tx], - node, - success=False, - reject_reason="was not accepted: version") + [tx], node, success=False, reject_reason="was not accepted: version" + ) return bad_version_txs self.log.info("These are always OK for the mempool") @@ -84,25 +86,24 @@ bad_version_txs = test_mempool_rejects_bad_versions() self.log.info( - "Before Wellington, we CAN mine blocks with txs with bad versions") + "Before Wellington, we CAN mine blocks with txs with bad versions" + ) block = self.make_block(blocks[-1], txs=bad_version_txs) peer.send_blocks_and_test([block], node, success=True) blocks.append(block) self.log.info( - "Before Wellington, we CAN mine blocks with a coinbase with a bad " - "version") + "Before Wellington, we CAN mine blocks with a coinbase with a bad version" + ) for bad_version in BAD_VERSIONS: block = self.make_block(blocks[-1], coinbase_version=bad_version) peer.send_blocks_and_test([block], node, success=True) blocks.append(block) - self.log.info( - "Activate Wellington, mine 6 blocks starting at ACTIVATION_TIME") + self.log.info("Activate Wellington, mine 6 blocks starting at ACTIVATION_TIME") node.setmocktime(ACTIVATION_TIME) for offset in range(0, 6): - block = self.make_block( - blocks[-1], nTime=ACTIVATION_TIME + offset) + block = self.make_block(blocks[-1], nTime=ACTIVATION_TIME + offset) peer.send_blocks_and_test([block], node, success=True) blocks.append(block) @@ -117,40 +118,44 @@ bad_version_txs = test_mempool_rejects_bad_versions() self.log.info( - "After activation, we CANNOT mine blocks with txs with bad " - "versions anymore") + "After activation, we CANNOT mine blocks with txs with bad versions anymore" + ) for bad_tx in bad_version_txs: block = self.make_block(blocks[-1], txs=[bad_tx]) peer.send_blocks_and_test( - [block], - node, - success=False, - reject_reason="bad-txns-version") + [block], node, success=False, reject_reason="bad-txns-version" + ) self.log.info( "After activation, we CANNOT mine blocks with a coinbase with a " - "bad version anymore") + "bad version anymore" + ) for bad_version in BAD_VERSIONS: block = self.make_block(blocks[-1], coinbase_version=bad_version) peer.send_blocks_and_test( - [block], - node, - success=False, - reject_reason="bad-txns-version") + [block], node, success=False, reject_reason="bad-txns-version" + ) def make_tx(self, spend_tx, nVersion): value = spend_tx.vout[0].nValue - 1000 assert_greater_than_or_equal(value, 546) tx = create_tx_with_script( - spend_tx, 0, amount=value, script_pub_key=P2SH_OP_TRUE) + spend_tx, 0, amount=value, script_pub_key=P2SH_OP_TRUE + ) tx.nVersion = nVersion tx.vin[0].scriptSig = SCRIPTSIG_OP_TRUE pad_tx(tx) tx.rehash() return tx - def make_block(self, prev_block: CBlock, *, nTime: Optional[int] = None, - coinbase_version=None, txs=None) -> CBlock: + def make_block( + self, + prev_block: CBlock, + *, + nTime: Optional[int] = None, + coinbase_version=None, + txs=None, + ) -> CBlock: if prev_block.sha256 is None: prev_block.rehash() assert prev_block.sha256 is not None @@ -173,5 +178,5 @@ return block -if __name__ == '__main__': +if __name__ == "__main__": TxVersionTest().main() diff --git a/test/functional/feature_uaclient.py b/test/functional/feature_uaclient.py --- a/test/functional/feature_uaclient.py +++ b/test/functional/feature_uaclient.py @@ -20,75 +20,104 @@ self.log.info("test -uaclientname and -uaclientversion") default_useragent = self.nodes[0].getnetworkinfo()["subversion"] expected = "/Bitcoin ABC:" - assert_equal(default_useragent[:len(expected)], expected) - default_version = default_useragent[default_useragent.index(':') + 1:] - default_version = default_version[:default_version.index('/')] + assert_equal(default_useragent[: len(expected)], expected) + default_version = default_useragent[default_useragent.index(":") + 1 :] + default_version = default_version[: default_version.index("/")] self.restart_node(0, ["-uaclientname=Foo Client"]) foo_ua = self.nodes[0].getnetworkinfo()["subversion"] expected = f"/Foo Client:{default_version}" - assert_equal(foo_ua[:len(expected)], expected) + assert_equal(foo_ua[: len(expected)], expected) self.restart_node(0, ["-uaclientversion=123.45"]) foo_ua = self.nodes[0].getnetworkinfo()["subversion"] expected = "/Bitcoin ABC:123.45" - assert_equal(foo_ua[:len(expected)], expected) + assert_equal(foo_ua[: len(expected)], expected) - self.log.info( - "non-numeric version allowed (although not recommended in BIP14)") + self.log.info("non-numeric version allowed (although not recommended in BIP14)") self.restart_node(0, ["-uaclientversion=Version Two"]) foo_ua = self.nodes[0].getnetworkinfo()["subversion"] expected = "/Bitcoin ABC:Version Two" - assert_equal(foo_ua[:len(expected)], expected) + assert_equal(foo_ua[: len(expected)], expected) self.log.info("test -uaclient doesn't break -uacomment") - self.restart_node(0, ["-uaclientname=Bar Client", - "-uaclientversion=3000", - "-uacomment=spam bacon and eggs"]) + self.restart_node( + 0, + [ + "-uaclientname=Bar Client", + "-uaclientversion=3000", + "-uacomment=spam bacon and eggs", + ], + ) bar_ua = self.nodes[0].getnetworkinfo()["subversion"] expected = "/Bar Client:3000" - assert_equal(bar_ua[:len(expected)], expected) + assert_equal(bar_ua[: len(expected)], expected) assert "spam bacon and eggs" in bar_ua self.log.info("test -uaclientname max length") self.stop_node(0) - expected = r"Error: Total length of network version string \([0-9]+\) exceeds maximum length \([0-9]+\)\. Reduce the number or size of uacomments\." + expected = ( + r"Error: Total length of network version string \([0-9]+\) exceeds maximum" + r" length \([0-9]+\)\. Reduce the number or size of uacomments\." + ) self.nodes[0].assert_start_raises_init_error( - [f"-uaclientname={'a' * 256}"], expected, match=ErrorMatch.FULL_REGEX) + [f"-uaclientname={'a' * 256}"], expected, match=ErrorMatch.FULL_REGEX + ) self.log.info("test -uaclientversion max length") - expected = r"Error: Total length of network version string \([0-9]+\) exceeds maximum length \([0-9]+\)\. Reduce the number or size of uacomments\." + expected = ( + r"Error: Total length of network version string \([0-9]+\) exceeds maximum" + r" length \([0-9]+\)\. Reduce the number or size of uacomments\." + ) self.nodes[0].assert_start_raises_init_error( - [f"-uaclientversion={'a' * 256}"], expected, match=ErrorMatch.FULL_REGEX) + [f"-uaclientversion={'a' * 256}"], expected, match=ErrorMatch.FULL_REGEX + ) self.log.info("test -uaclientname and -uaclientversion max length") - expected = r"Error: Total length of network version string \([0-9]+\) exceeds maximum length \([0-9]+\)\. Reduce the number or size of uacomments\." + expected = ( + r"Error: Total length of network version string \([0-9]+\) exceeds maximum" + r" length \([0-9]+\)\. Reduce the number or size of uacomments\." + ) self.nodes[0].assert_start_raises_init_error( - [f"-uaclientname={'a' * 128}", f"-uaclientversion={'a' * 128}"], expected, match=ErrorMatch.FULL_REGEX) + [f"-uaclientname={'a' * 128}", f"-uaclientversion={'a' * 128}"], + expected, + match=ErrorMatch.FULL_REGEX, + ) - self.log.info( - "test -uaclientname and -uaclientversion invalid characters") - for invalid_char in ['/', ':', '(', ')', '*', '!', '₿', '🏃']: + self.log.info("test -uaclientname and -uaclientversion invalid characters") + for invalid_char in ["/", ":", "(", ")", "*", "!", "₿", "🏃"]: # for client name - expected = r"Error: -uaclientname \(" + \ - re.escape(invalid_char) + r"\) contains invalid characters\." + expected = ( + r"Error: -uaclientname \(" + + re.escape(invalid_char) + + r"\) contains invalid characters\." + ) self.nodes[0].assert_start_raises_init_error( - [f"-uaclientname={invalid_char}"], - expected, match=ErrorMatch.FULL_REGEX) + [f"-uaclientname={invalid_char}"], expected, match=ErrorMatch.FULL_REGEX + ) # for client version - expected = r"Error: -uaclientversion \(" + \ - re.escape(invalid_char) + r"\) contains invalid characters\." + expected = ( + r"Error: -uaclientversion \(" + + re.escape(invalid_char) + + r"\) contains invalid characters\." + ) self.nodes[0].assert_start_raises_init_error( [f"-uaclientversion={invalid_char}"], - expected, match=ErrorMatch.FULL_REGEX) + expected, + match=ErrorMatch.FULL_REGEX, + ) # for both - expected = r"Error: -uaclientname \(" + \ - re.escape(invalid_char) + r"\) contains invalid characters\." + expected = ( + r"Error: -uaclientname \(" + + re.escape(invalid_char) + + r"\) contains invalid characters\." + ) self.nodes[0].assert_start_raises_init_error( - [f"-uaclientname={invalid_char}", - f"-uaclientversion={invalid_char}"], - expected, match=ErrorMatch.FULL_REGEX) + [f"-uaclientname={invalid_char}", f"-uaclientversion={invalid_char}"], + expected, + match=ErrorMatch.FULL_REGEX, + ) -if __name__ == '__main__': +if __name__ == "__main__": UseragentTest().main() diff --git a/test/functional/feature_uacomment.py b/test/functional/feature_uacomment.py --- a/test/functional/feature_uacomment.py +++ b/test/functional/feature_uacomment.py @@ -27,17 +27,25 @@ self.log.info("test -uacomment max length") self.stop_node(0) - expected = r"Error: Total length of network version string \([0-9]+\) exceeds maximum length \(256\). Reduce the number or size of uacomments." + expected = ( + r"Error: Total length of network version string \([0-9]+\) exceeds maximum" + r" length \(256\). Reduce the number or size of uacomments." + ) self.nodes[0].assert_start_raises_init_error( - [f"-uacomment={'a' * 256}"], expected, match=ErrorMatch.FULL_REGEX) + [f"-uacomment={'a' * 256}"], expected, match=ErrorMatch.FULL_REGEX + ) self.log.info("test -uacomment unsafe characters") - for unsafe_char in ['/', ':', '(', ')', '₿', '🏃']: - expected = r"Error: User Agent comment \(" + re.escape( - unsafe_char) + r"\) contains unsafe characters." + for unsafe_char in ["/", ":", "(", ")", "₿", "🏃"]: + expected = ( + r"Error: User Agent comment \(" + + re.escape(unsafe_char) + + r"\) contains unsafe characters." + ) self.nodes[0].assert_start_raises_init_error( - [f"-uacomment={unsafe_char}"], expected, match=ErrorMatch.FULL_REGEX) + [f"-uacomment={unsafe_char}"], expected, match=ErrorMatch.FULL_REGEX + ) -if __name__ == '__main__': +if __name__ == "__main__": UacommentTest().main() diff --git a/test/functional/feature_utxo_set_hash.py b/test/functional/feature_utxo_set_hash.py --- a/test/functional/feature_utxo_set_hash.py +++ b/test/functional/feature_utxo_set_hash.py @@ -27,11 +27,13 @@ # These depend on the setup_clean_chain option, the chain loaded from # the cache assert_equal( - self.nodes[0].gettxoutsetinfo()['hash_serialized'], - "b32ec1dda5a53cd025b95387aad344a801825fe46a60ff952ce26528f01d3be8") + self.nodes[0].gettxoutsetinfo()["hash_serialized"], + "b32ec1dda5a53cd025b95387aad344a801825fe46a60ff952ce26528f01d3be8", + ) assert_equal( - self.nodes[0].gettxoutsetinfo("muhash")['muhash'], - "dd5ad2a105c2d29495f577245c357409002329b9f4d6182c0af3dc2f462555c8") + self.nodes[0].gettxoutsetinfo("muhash")["muhash"], + "dd5ad2a105c2d29495f577245c357409002329b9f4d6182c0af3dc2f462555c8", + ) def test_muhash_implementation(self): self.log.info("Test MuHash implementation consistency") @@ -42,21 +44,20 @@ # coinbase block_hashes = self.generate(node, 100) blocks = [ - FromHex(CBlock(), node.getblock(block, False)) for block in block_hashes] + FromHex(CBlock(), node.getblock(block, False)) for block in block_hashes + ] spending = blocks.pop(0) # Create a spending transaction and mine a block which includes it tx = create_transaction( - node, spending.vtx[0].rehash(), node.getnewaddress(), - amount=49_000_000) - txid = node.sendrawtransaction( - hexstring=tx.serialize().hex(), maxfeerate=0) + node, spending.vtx[0].rehash(), node.getnewaddress(), amount=49_000_000 + ) + txid = node.sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0) - tx_block = self.generateblock(node, - output=node.getnewaddress(), - transactions=[txid]) - blocks.append( - FromHex(CBlock(), node.getblock(tx_block['hash'], False))) + tx_block = self.generateblock( + node, output=node.getnewaddress(), transactions=[txid] + ) + blocks.append(FromHex(CBlock(), node.getblock(tx_block["hash"], False))) # Serialize the outputs that should be in the UTXO set and add them to # a MuHash object @@ -78,7 +79,7 @@ muhash.insert(data) finalized = muhash.digest() - node_muhash = node.gettxoutsetinfo("muhash")['muhash'] + node_muhash = node.gettxoutsetinfo("muhash")["muhash"] assert_equal(finalized[::-1].hex(), node_muhash) @@ -87,5 +88,5 @@ self.test_muhash_implementation() -if __name__ == '__main__': +if __name__ == "__main__": UTXOSetHashTest().main()