diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py --- a/test/functional/feature_assumevalid.py +++ b/test/functional/feature_assumevalid.py @@ -89,11 +89,12 @@ if current_height != last_height: last_height = current_height if timeout < 0: - assert False, "blockchain too short after timeout: %d" % current_height + assert False, "blockchain too short after timeout: {}".format( + current_height) timeout - 0.25 continue elif current_height > height: - assert False, "blockchain too long: %d" % current_height + assert False, "blockchain too long: {}".format(current_height) elif current_height == height: break diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py --- a/test/functional/feature_cltv.py +++ b/test/functional/feature_cltv.py @@ -78,7 +78,7 @@ # wait_for_verack ensures that the P2P connection is fully up. self.nodes[0].p2p.wait_for_verack() - self.log.info("Mining %d blocks", CLTV_HEIGHT - 2) + self.log.info("Mining {} blocks".format(CLTV_HEIGHT - 2)) self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2) self.nodeaddress = self.nodes[0].getnewaddress() diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py --- a/test/functional/feature_dbcrash.py +++ b/test/functional/feature_dbcrash.py @@ -102,7 +102,7 @@ # TODO: If this happens a lot, we should try to restart without -dbcrashratio # and make sure that recovery happens. raise AssertionError( - "Unable to successfully restart node %d in allotted time", node_index) + "Unable to successfully restart node {} in allotted time".format(node_index)) def submit_block_catch_error(self, node_index, block): """Try submitting a block to the given node. @@ -117,17 +117,17 @@ # Prior to 3.5 BadStatusLine('') was raised for a remote disconnect error. if sys.version_info[0] == 3 and sys.version_info[1] < 5 and e.line == "''": self.log.debug( - "node %d submitblock raised exception: %s", node_index, e) + "node {} submitblock raised exception: {}".format(node_index, e)) return False else: raise except tuple(HTTP_DISCONNECT_ERRORS) as e: self.log.debug( - "node %d submitblock raised exception: %s", node_index, e) + "node {} submitblock raised exception: {}".format(node_index, e)) return False except OSError as e: self.log.debug( - "node %d submitblock raised OSError exception: errno=%s", node_index, e.errno) + "node {} submitblock raised OSError exception: errno={}".format(node_index, e.errno)) if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]: # The node has likely crashed return False @@ -153,16 +153,16 @@ # Deliver each block to each other node for i in range(3): nodei_utxo_hash = None - self.log.debug("Syncing blocks to node %d", i) + self.log.debug("Syncing blocks to node {}".format(i)) for (block_hash, block) in blocks: # Get the block from node3, and submit to node_i - self.log.debug("submitting block %s", block_hash) + self.log.debug("submitting block {}".format(block_hash)) if not self.submit_block_catch_error(i, block): # TODO: more carefully check that the crash is due to -dbcrashratio # (change the exit code perhaps, and check that here?) self.wait_for_node_exit(i, timeout=30) self.log.debug( - "Restarting node %d after block hash %s", i, block_hash) + "Restarting node %d after block hash {}".format(i, block_hash)) nodei_utxo_hash = self.restart_node(i, block_hash) assert nodei_utxo_hash is not None self.restart_counts[i] += 1 @@ -179,7 +179,8 @@ # - we only update the utxo cache after a node restart, since flushing # the cache is a no-op at that point if nodei_utxo_hash is not None: - self.log.debug("Checking txoutsetinfo matches for node %d", i) + self.log.debug( + "Checking txoutsetinfo matches for node {}".format(i)) assert_equal(nodei_utxo_hash, node3_utxo_hash) def verify_utxo_hash(self): @@ -234,15 +235,15 @@ # Start by creating a lot of utxos on node3 initial_height = self.nodes[3].getblockcount() utxo_list = create_confirmed_utxos(self.nodes[3], 5000) - self.log.info("Prepped %d utxo entries", len(utxo_list)) + self.log.info("Prepped {} utxo entries".format(len(utxo_list))) # Sync these blocks with the other nodes block_hashes_to_sync = [] for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1): block_hashes_to_sync.append(self.nodes[3].getblockhash(height)) - self.log.debug("Syncing %d blocks with other nodes", - len(block_hashes_to_sync)) + self.log.debug("Syncing {} blocks with other nodes".format( + len(block_hashes_to_sync))) # Syncing the blocks could cause nodes to crash, so the test begins here. self.sync_node3blocks(block_hashes_to_sync) @@ -253,20 +254,21 @@ # and then either mine a single new block on the tip, or some-sized reorg. for i in range(40): self.log.info( - "Iteration %d, generating 2500 transactions %s", i, self.restart_counts) + "Iteration {}, generating 2500 transactions {}".format( + i, self.restart_counts)) # Generate a bunch of small-ish transactions self.generate_small_transactions(self.nodes[3], 2500, utxo_list) # Pick a random block between current tip, and starting tip current_height = self.nodes[3].getblockcount() random_height = random.randint(starting_tip_height, current_height) - self.log.debug("At height %d, considering height %d", - current_height, random_height) + self.log.debug("At height {}, considering height {}".format( + current_height, random_height)) if random_height > starting_tip_height: # Randomly reorg from this point with some probability (1/4 for # tip, 1/5 for tip-1, ...) if random.random() < 1.0 / (current_height + 4 - random_height): self.log.debug( - "Invalidating block at height %d", random_height) + "Invalidating block at height {}".format(random_height)) self.nodes[3].invalidateblock( self.nodes[3].getblockhash(random_height)) @@ -276,10 +278,11 @@ while current_height + 1 > self.nodes[3].getblockcount(): block_hashes.extend(self.nodes[3].generate( min(10, current_height + 1 - self.nodes[3].getblockcount()))) - self.log.debug("Syncing %d new blocks...", len(block_hashes)) + self.log.debug( + "Syncing {} new blocks...".format(len(block_hashes))) self.sync_node3blocks(block_hashes) utxo_list = self.nodes[3].listunspent() - self.log.debug("Node3 utxo count: %d", len(utxo_list)) + self.log.debug("Node3 utxo count: {}".format(len(utxo_list))) # Check that the utxo hashes agree with node3 # Useful side effect: each utxo cache gets flushed here, so that we @@ -287,8 +290,8 @@ self.verify_utxo_hash() # Check the test coverage - self.log.info("Restarted nodes: %s; crashes on restart: %d", - self.restart_counts, self.crashed_on_restart) + self.log.info("Restarted nodes: {}; crashes on restart: {}".format( + self.restart_counts, self.crashed_on_restart)) # If no nodes were restarted, we didn't test anything. assert self.restart_counts != [0, 0, 0] @@ -299,7 +302,8 @@ # Warn if any of the nodes escaped restart. for i in range(3): if self.restart_counts[i] == 0: - self.log.warn("Node %d never crashed during utxo flush!", i) + self.log.warn( + "Node {} never crashed during utxo flush!".format(i)) if __name__ == "__main__": diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py --- a/test/functional/feature_dersig.py +++ b/test/functional/feature_dersig.py @@ -63,7 +63,7 @@ # wait_for_verack ensures that the P2P connection is fully up. self.nodes[0].p2p.wait_for_verack() - self.log.info("Mining %d blocks", DERSIG_HEIGHT - 1) + self.log.info("Mining {} blocks".format(DERSIG_HEIGHT - 1)) self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 1) self.nodeaddress = self.nodes[0].getnewaddress() diff --git a/test/functional/feature_minchainwork.py b/test/functional/feature_minchainwork.py --- a/test/functional/feature_minchainwork.py +++ b/test/functional/feature_minchainwork.py @@ -47,17 +47,19 @@ # minchainwork is exceeded starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work self.log.info( - "Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1]) + "Testing relay across node {} (minChainWork = {})".format( + 1, self.node_min_work[1])) starting_blockcount = self.nodes[2].getblockcount() num_blocks_to_generate = int( (self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK) - self.log.info("Generating %d blocks on node0", num_blocks_to_generate) + self.log.info("Generating {} blocks on node0".format( + num_blocks_to_generate)) hashes = self.nodes[0].generate(num_blocks_to_generate) - self.log.info("Node0 current chain work: %s", - self.nodes[0].getblockheader(hashes[-1])['chainwork']) + self.log.info("Node0 current chain work: {}".format( + self.nodes[0].getblockheader(hashes[-1])['chainwork'])) # Sleep a few seconds and verify that node2 didn't get any new blocks # or headers. We sleep, rather than sync_blocks(node0, node1) because @@ -66,8 +68,8 @@ time.sleep(3) self.log.info("Verifying node 2 has no more blocks than before") - self.log.info("Blockcounts: %s", [ - n.getblockcount() for n in self.nodes]) + self.log.info("Blockcounts: {}".format( + [n.getblockcount() for n in self.nodes])) # Node2 shouldn't have any new headers yet, because node1 should not # have relayed anything. assert_equal(len(self.nodes[2].getchaintips()), 1) @@ -89,8 +91,8 @@ # continue the test. self.sync_all() - self.log.info("Blockcounts: %s", [ - n.getblockcount() for n in self.nodes]) + self.log.info("Blockcounts: {}".format( + [n.getblockcount() for n in self.nodes])) if __name__ == '__main__': diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py --- a/test/functional/feature_notifications.py +++ b/test/functional/feature_notifications.py @@ -9,7 +9,7 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, wait_until, connect_nodes_bi -FORK_WARNING_MESSAGE = "Warning: Large-work fork detected, forking after block %s\n" +FORK_WARNING_MESSAGE = "Warning: Large-work fork detected, forking after block {}\n" class NotificationsTest(BitcoinTestFramework): @@ -25,11 +25,12 @@ # -alertnotify and -blocknotify on node0, walletnotify on node1 self.extra_args = [["-blockversion=2", - "-alertnotify=echo %%s >> %s" % self.alert_filename, - "-blocknotify=echo %%s >> %s" % self.block_filename], + "-alertnotify=echo %s >> {}".format( + self.alert_filename), + "-blocknotify=echo %s >> {}".format(self.block_filename)], ["-blockversion=211", "-rescan", - "-walletnotify=echo %%s >> %s" % self.tx_filename]] + "-walletnotify=echo %s >> {}".format(self.tx_filename)]] super().setup_network() def run_test(self): @@ -111,7 +112,7 @@ self.log.info(self.alert_filename) with open(self.alert_filename, 'r', encoding='utf8') as f: - assert_equal(f.read(), (FORK_WARNING_MESSAGE % fork_block)) + assert_equal(f.read(), (FORK_WARNING_MESSAGE.format(fork_block))) if __name__ == '__main__': diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py --- a/test/functional/feature_proxy.py +++ b/test/functional/feature_proxy.py @@ -83,17 +83,20 @@ # this is because the proxy to use is based on CService.GetNetwork(), # which return NET_UNROUTABLE for localhost args = [ - ['-listen', '-proxy=%s:%i' % - (self.conf1.addr), '-proxyrandomize=1'], - ['-listen', '-proxy=%s:%i' % - (self.conf1.addr), '-onion=%s:%i' % (self.conf2.addr), '-proxyrandomize=0'], - ['-listen', '-proxy=%s:%i' % - (self.conf2.addr), '-proxyrandomize=1'], + ['-listen', '-proxy={}:{}'.format( + self.conf1.addr[0], self.conf1.addr[1]), '-proxyrandomize=1'], + ['-listen', '-proxy={}:{}'.format( + self.conf1.addr[0], self.conf1.addr[1]), + '-onion={}:{}'.format( + self.conf2.addr[0], self.conf2.addr[1]), '-proxyrandomize=0'], + ['-listen', '-proxy={}:{}'.format( + self.conf2.addr[0], self.conf2.addr[1]), '-proxyrandomize=1'], [] ] if self.have_ipv6: - args[3] = ['-listen', '-proxy=[%s]:%i' % - (self.conf3.addr), '-proxyrandomize=0', '-noonion'] + args[3] = ['-listen', '-proxy=[{}]:{}'.format( + self.conf3.addr[0], self.conf3.addr[1]), '-proxyrandomize=0', + '-noonion'] self.add_nodes(self.num_nodes, extra_args=args) self.start_nodes() @@ -187,28 +190,33 @@ # test RPC getnetworkinfo n0 = networks_dict(self.nodes[0].getnetworkinfo()) for net in ['ipv4', 'ipv6', 'onion']: - assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr)) + assert_equal(n0[net]['proxy'], '{}:{}'.format( + self.conf1.addr[0], self.conf1.addr[1])) assert_equal(n0[net]['proxy_randomize_credentials'], True) assert_equal(n0['onion']['reachable'], True) n1 = networks_dict(self.nodes[1].getnetworkinfo()) for net in ['ipv4', 'ipv6']: - assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr)) + assert_equal(n1[net]['proxy'], '{}:{}'.format( + self.conf1.addr[0], self.conf1.addr[1])) assert_equal(n1[net]['proxy_randomize_credentials'], False) - assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr)) + assert_equal(n1['onion']['proxy'], '{}:{}'.format( + self.conf2.addr[0], self.conf2.addr[1])) assert_equal(n1['onion']['proxy_randomize_credentials'], False) assert_equal(n1['onion']['reachable'], True) n2 = networks_dict(self.nodes[2].getnetworkinfo()) for net in ['ipv4', 'ipv6', 'onion']: - assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr)) + assert_equal(n2[net]['proxy'], '{}:{}'.format( + self.conf2.addr[0], self.conf2.addr[1])) assert_equal(n2[net]['proxy_randomize_credentials'], True) assert_equal(n2['onion']['reachable'], True) if self.have_ipv6: n3 = networks_dict(self.nodes[3].getnetworkinfo()) for net in ['ipv4', 'ipv6']: - assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr)) + assert_equal(n3[net]['proxy'], '[{}]:{}'.format( + self.conf3.addr[0], self.conf3.addr[1])) assert_equal(n3[net]['proxy_randomize_credentials'], False) assert_equal(n3['onion']['reachable'], False) diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -84,8 +84,8 @@ if not os.path.isfile(self.prunedir + "blk00000.dat"): raise AssertionError("blk00000.dat is missing, pruning too early") self.log.info("Success") - self.log.info("Though we're already using more than 550MiB, current usage: %d" % - calc_usage(self.prunedir)) + self.log.info("Though we're already using more than 550MiB, current usage: {}".format( + calc_usage(self.prunedir))) self.log.info( "Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full @@ -102,7 +102,7 @@ self.log.info("Success") usage = calc_usage(self.prunedir) - self.log.info("Usage should be below target: %d" % usage) + self.log.info("Usage should be below target: {}".format(usage)) if (usage > 550): raise AssertionError("Pruning target not being met") @@ -139,8 +139,8 @@ connect_nodes(self.nodes[2], self.nodes[0]) sync_blocks(self.nodes[0:3]) - self.log.info("Usage can be over target because of high stale rate: %d" % - calc_usage(self.prunedir)) + self.log.info("Usage can be over target because of high stale rate: {}".format( + calc_usage(self.prunedir))) def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node @@ -155,12 +155,12 @@ "-disablesafemode", "-noparkdeepreorg", "-maxreorgdepth=-1"]) height = self.nodes[1].getblockcount() - self.log.info("Current block height: %d" % height) + self.log.info("Current block height: {}".format(height)) invalidheight = height - 287 badhash = self.nodes[1].getblockhash(invalidheight) - self.log.info("Invalidating block %s at height %d" % - (badhash, invalidheight)) + self.log.info("Invalidating block {} at height {}".format( + badhash, invalidheight)) self.nodes[1].invalidateblock(badhash) # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want. @@ -173,7 +173,8 @@ curhash = self.nodes[1].getblockhash(invalidheight - 1) assert(self.nodes[1].getblockcount() == invalidheight - 1) - self.log.info("New best height: %d" % self.nodes[1].getblockcount()) + self.log.info("New best height: {}".format( + self.nodes[1].getblockcount())) # Reboot node1 to clear those giant tx's from mempool self.stop_node(1) @@ -189,10 +190,10 @@ connect_nodes(self.nodes[2], self.nodes[1]) sync_blocks(self.nodes[0:3], timeout=120) - self.log.info("Verify height on node 2: %d" % - self.nodes[2].getblockcount()) - self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % - calc_usage(self.prunedir)) + self.log.info("Verify height on node 2: {}".format( + self.nodes[2].getblockcount())) + self.log.info("Usage possibly still high bc of stale blocks in block files: {}".format( + calc_usage(self.prunedir))) self.log.info( "Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") @@ -209,7 +210,7 @@ sync_blocks(self.nodes[0:3], timeout=300) usage = calc_usage(self.prunedir) - self.log.info("Usage should be below target: %d" % usage) + self.log.info("Usage should be below target: {}".format(usage)) if (usage > 550): raise AssertionError("Pruning target not being met") @@ -219,7 +220,8 @@ # Verify that a block on the old main chain fork has been pruned away assert_raises_rpc_error( -1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) - self.log.info("Will need to redownload block %d" % self.forkheight) + self.log.info( + "Will need to redownload block {}".format(self.forkheight)) # Verify that we have enough history to reorg back to the fork point. # Although this is more than 288 blocks, because this chain was written @@ -246,7 +248,8 @@ if self.nodes[2].getblockcount() < self.mainchainheight: blocks_to_mine = first_reorg_height + 1 - self.mainchainheight self.log.info( - "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine) + "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: {}".format( + blocks_to_mine)) self.nodes[0].invalidateblock(curchainhash) assert(self.nodes[0].getblockcount() == self.mainchainheight) assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)