Page MenuHomePhabricator

D13209.diff
No OneTemporary

D13209.diff

diff --git a/test/functional/abc-cmdline.py b/test/functional/abc-cmdline.py
--- a/test/functional/abc-cmdline.py
+++ b/test/functional/abc-cmdline.py
@@ -82,16 +82,17 @@
def excessiveblocksize_test(self):
self.log.info("Testing -excessiveblocksize")
- self.log.info(" Set to twice the default, i.e. {} bytes".format(
- 2 * LEGACY_MAX_BLOCK_SIZE))
+ self.log.info(
+ f" Set to twice the default, i.e. {2 * LEGACY_MAX_BLOCK_SIZE} bytes")
self.stop_node(0)
self.start_node(0, [f"-excessiveblocksize={2 * LEGACY_MAX_BLOCK_SIZE}"])
self.check_excessive(2 * LEGACY_MAX_BLOCK_SIZE)
# Check for EB correctness in the subver string
self.check_subversion(r"/Bitcoin ABC:.*\(EB2\.0; .*\)/")
- self.log.info(" Attempt to set below legacy limit of 1MB - try {} bytes".format(
- LEGACY_MAX_BLOCK_SIZE))
+ self.log.info(
+ " Attempt to set below legacy limit of 1MB - try "
+ f"{LEGACY_MAX_BLOCK_SIZE} bytes")
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(
[f"-excessiveblocksize={LEGACY_MAX_BLOCK_SIZE}"],
diff --git a/test/functional/abc-replay-protection.py b/test/functional/abc-replay-protection.py
--- a/test/functional/abc-replay-protection.py
+++ b/test/functional/abc-replay-protection.py
@@ -54,8 +54,7 @@
self.tip = None
self.blocks = {}
self.extra_args = [['-whitelist=noban@127.0.0.1',
- "-replayprotectionactivationtime={}".format(
- REPLAY_PROTECTION_START_TIME),
+ f"-replayprotectionactivationtime={REPLAY_PROTECTION_START_TIME}",
"-acceptnonstdtxn=1"]]
def next_block(self, number):
diff --git a/test/functional/abc_rpc_excessiveblock.py b/test/functional/abc_rpc_excessiveblock.py
--- a/test/functional/abc_rpc_excessiveblock.py
+++ b/test/functional/abc_rpc_excessiveblock.py
@@ -15,8 +15,9 @@
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
-BLOCKSIZE_TOO_LOW = "Error: Excessive block size must be > {:,} bytes".format(
- LEGACY_MAX_BLOCK_SIZE)
+BLOCKSIZE_TOO_LOW = (
+ f"Error: Excessive block size must be > {LEGACY_MAX_BLOCK_SIZE:,} bytes"
+)
class ExcessiveBlockSizeRPCTest(BitcoinTestFramework):
diff --git a/test/functional/combine_logs.py b/test/functional/combine_logs.py
--- a/test/functional/combine_logs.py
+++ b/test/functional/combine_logs.py
@@ -188,17 +188,16 @@
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
- print("File {} could not be opened. Continuing without it.".format(
- logfile), file=sys.stderr)
+ print(f"File {logfile} could not be opened. Continuing without it.",
+ file=sys.stderr)
def print_logs_plain(log_events, colors):
"""Renders the iterator of log events into text."""
for event in log_events:
lines = event.event.splitlines()
- print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()],
- event.source, lines[0],
- colors["reset"]))
+ print(f"{colors[event.source.rstrip()]} {event.source: <5} "
+ f"{lines[0]} {colors['reset']}")
if len(lines) > 1:
for line in lines[1:]:
print(f"{colors[event.source.rstrip()]}{line}{colors['reset']}")
diff --git a/test/functional/feature_minchainwork.py b/test/functional/feature_minchainwork.py
--- a/test/functional/feature_minchainwork.py
+++ b/test/functional/feature_minchainwork.py
@@ -59,8 +59,9 @@
num_blocks_to_generate,
sync_fun=self.no_op)
- self.log.info("Node0 current chain work: {}".format(
- self.nodes[0].getblockheader(hashes[-1])['chainwork']))
+ self.log.info(
+ "Node0 current chain work: "
+ f"{self.nodes[0].getblockheader(hashes[-1])['chainwork']}")
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py
--- a/test/functional/feature_notifications.py
+++ b/test/functional/feature_notifications.py
@@ -42,9 +42,9 @@
os.mkdir(self.walletnotify_dir)
# -alertnotify and -blocknotify on node0, walletnotify on node1
- self.extra_args = [["-alertnotify=echo > {}".format(
- os.path.join(self.alertnotify_dir, '%s')),
- f"-blocknotify=echo > {os.path.join(self.blocknotify_dir, '%s')}"],
+ self.extra_args = [
+ [f"-alertnotify=echo > {os.path.join(self.alertnotify_dir, '%s')}",
+ f"-blocknotify=echo > {os.path.join(self.blocknotify_dir, '%s')}"],
["-rescan",
f"-walletnotify=echo > {os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s'))}"]]
self.wallet_names = [self.default_wallet_name, self.wallet]
diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py
--- a/test/functional/feature_proxy.py
+++ b/test/functional/feature_proxy.py
@@ -102,20 +102,21 @@
# This is because the proxy to use is based on CService.GetNetwork(),
# which returns NET_UNROUTABLE for localhost.
args = [
- ['-listen', '-proxy={}:{}'.format(
- self.conf1.addr[0], self.conf1.addr[1]), '-proxyrandomize=1'],
+ ['-listen', f'-proxy={self.conf1.addr[0]}:{self.conf1.addr[1]}',
+ '-proxyrandomize=1'],
['-listen', '-proxy={}:{}'.format(*self.conf1.addr),
'-onion={}:{}'.format(*self.conf2.addr),
'-i2psam={}:{}'.format(*self.i2p_sam), '-i2pacceptincoming=0',
'-proxyrandomize=0'],
- ['-listen', '-proxy={}:{}'.format(
- self.conf2.addr[0], self.conf2.addr[1]), '-proxyrandomize=1'],
+ ['-listen', f'-proxy={self.conf2.addr[0]}:{self.conf2.addr[1]}',
+ '-proxyrandomize=1'],
[]
]
if self.have_ipv6:
- args[3] = ['-listen', '-proxy=[{}]:{}'.format(
- self.conf3.addr[0], self.conf3.addr[1]), '-proxyrandomize=0',
- '-noonion']
+ args[3] = [
+ '-listen',
+ f'-proxy=[{self.conf3.addr[0]}]:{self.conf3.addr[1]}',
+ '-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py
--- a/test/functional/feature_pruning.py
+++ b/test/functional/feature_pruning.py
@@ -136,8 +136,8 @@
assert os.path.isfile(os.path.join(
self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early"
self.log.info("Success")
- self.log.info("Though we're already using more than 550MiB, current usage: {}".format(
- calc_usage(self.prunedir)))
+ self.log.info("Though we're already using more than 550MiB, current usage: "
+ f"{calc_usage(self.prunedir)}")
self.log.info(
"Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full
@@ -178,8 +178,8 @@
self.connect_nodes(0, 2)
self.sync_blocks(self.nodes[0:3])
- self.log.info("Usage can be over target because of high stale rate: {}".format(
- calc_usage(self.prunedir)))
+ self.log.info("Usage can be over target because of high stale rate: "
+ f"{calc_usage(self.prunedir)}")
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node
@@ -219,8 +219,8 @@
self.sync_blocks(self.nodes[0:3], timeout=120)
self.log.info(f"Verify height on node 2: {self.nodes[2].getblockcount()}")
- self.log.info("Usage possibly still high because of stale blocks in block files: {}".format(
- calc_usage(self.prunedir)))
+ self.log.info("Usage possibly still high because of stale blocks in block "
+ f"files: {calc_usage(self.prunedir)}")
self.log.info(
"Mine 220 more large blocks so we have requisite history")
@@ -266,8 +266,8 @@
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
self.log.info(
- "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: {}".format(
- blocks_to_mine))
+ "Rewind node 0 to prev main chain to mine longer chain to trigger "
+ f"redownload. Blocks needed: {blocks_to_mine}")
self.nodes[0].invalidateblock(curchainhash)
assert_equal(self.nodes[0].getblockcount(), self.mainchainheight)
assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2)
diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py
--- a/test/functional/interface_bitcoin_cli.py
+++ b/test/functional/interface_bitcoin_cli.py
@@ -317,8 +317,10 @@
self.log.info("Test -version with node stopped")
self.stop_node(0)
cli_response = self.nodes[0].cli().send_cli('-version')
- assert "{} RPC client version".format(
- self.config['environment']['PACKAGE_NAME']) in cli_response
+ assert (
+ f"{self.config['environment']['PACKAGE_NAME']} RPC client version"
+ in cli_response
+ )
self.log.info(
"Test -rpcwait option successfully waits for RPC connection")
diff --git a/test/functional/interface_usdt_net.py b/test/functional/interface_usdt_net.py
--- a/test/functional/interface_usdt_net.py
+++ b/test/functional/interface_usdt_net.py
@@ -28,34 +28,28 @@
# larger messanges see contrib/tracing/log_raw_p2p_msgs.py
MAX_MSG_DATA_LENGTH = 150
-net_tracepoints_program = """
+net_tracepoints_program = f"""
#include <uapi/linux/ptrace.h>
-#define MAX_PEER_ADDR_LENGTH {}
-#define MAX_PEER_CONN_TYPE_LENGTH {}
-#define MAX_MSG_TYPE_LENGTH {}
-#define MAX_MSG_DATA_LENGTH {}
-""".format(
- MAX_PEER_ADDR_LENGTH,
- MAX_PEER_CONN_TYPE_LENGTH,
- MAX_MSG_TYPE_LENGTH,
- MAX_MSG_DATA_LENGTH
-) + """
-#define MIN(a,b) ({ __typeof__ (a) _a = (a); __typeof__ (b) _b = (b); _a < _b ? _a : _b; })
+#define MAX_PEER_ADDR_LENGTH {MAX_PEER_ADDR_LENGTH}
+#define MAX_PEER_CONN_TYPE_LENGTH {MAX_PEER_CONN_TYPE_LENGTH}
+#define MAX_MSG_TYPE_LENGTH {MAX_MSG_TYPE_LENGTH}
+#define MAX_MSG_DATA_LENGTH {MAX_MSG_DATA_LENGTH}
+#define MIN(a,b) ({{ __typeof__ (a) _a = (a); __typeof__ (b) _b = (b); _a < _b ? _a : _b; }})
struct p2p_message
-{
+{{
u64 peer_id;
char peer_addr[MAX_PEER_ADDR_LENGTH];
char peer_conn_type[MAX_PEER_CONN_TYPE_LENGTH];
char msg_type[MAX_MSG_TYPE_LENGTH];
u64 msg_size;
u8 msg[MAX_MSG_DATA_LENGTH];
-};
+}};
BPF_PERF_OUTPUT(inbound_messages);
-int trace_inbound_message(struct pt_regs *ctx) {
- struct p2p_message msg = {};
+int trace_inbound_message(struct pt_regs *ctx) {{
+ struct p2p_message msg = {{}};
bpf_usdt_readarg(1, ctx, &msg.peer_id);
bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH);
bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH);
@@ -64,11 +58,11 @@
bpf_usdt_readarg_p(6, ctx, &msg.msg, MIN(msg.msg_size, MAX_MSG_DATA_LENGTH));
inbound_messages.perf_submit(ctx, &msg, sizeof(msg));
return 0;
-}
+}}
BPF_PERF_OUTPUT(outbound_messages);
-int trace_outbound_message(struct pt_regs *ctx) {
- struct p2p_message msg = {};
+int trace_outbound_message(struct pt_regs *ctx) {{
+ struct p2p_message msg = {{}};
bpf_usdt_readarg(1, ctx, &msg.peer_id);
bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH);
bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH);
@@ -77,7 +71,7 @@
bpf_usdt_readarg_p(6, ctx, &msg.msg, MIN(msg.msg_size, MAX_MSG_DATA_LENGTH));
outbound_messages.perf_submit(ctx, &msg, sizeof(msg));
return 0;
-};
+}};
"""
diff --git a/test/functional/interface_usdt_validation.py b/test/functional/interface_usdt_validation.py
--- a/test/functional/interface_usdt_validation.py
+++ b/test/functional/interface_usdt_validation.py
@@ -77,8 +77,12 @@
]
def __repr__(self):
- return "ConnectedBlock(hash={} height={}, transactions={}, inputs={}, sigchk={}, duration={})".format(
- bytes(self.hash[::-1]).hex(), self.height, self.transactions, self.inputs, self.sigchk, self.duration)
+ return (
+ f"ConnectedBlock(hash={bytes(self.hash[::-1]).hex()} "
+ f"height={self.height}, transactions={self.transactions}, "
+ f"inputs={self.inputs}, sigchk={self.sigchk}, "
+ f"duration={self.duration})"
+ )
# The handle_* function is a ctypes callback function called from C. When
# we assert in the handle_* function, the AssertError doesn't propagate
diff --git a/test/functional/mempool_expiry.py b/test/functional/mempool_expiry.py
--- a/test/functional/mempool_expiry.py
+++ b/test/functional/mempool_expiry.py
@@ -60,8 +60,9 @@
assert_equal(
parent_txid,
node.getmempoolentry(child_txid)['depends'][0])
- self.log.info('Broadcast child transaction after {} hours.'.format(
- timedelta(seconds=(half_expiry_time - entry_time))))
+ self.log.info(
+ 'Broadcast child transaction after '
+ f'{timedelta(seconds=half_expiry_time - entry_time)} hours.')
# Broadcast another (independent) transaction.
independent_txid = self.wallet.send_self_transfer(
@@ -75,8 +76,9 @@
# is only checked when a new transaction is added to the mempool.
self.wallet.send_self_transfer(
from_node=node, utxo_to_spend=trigger_utxo1)
- self.log.info('Test parent tx not expired after {} hours.'.format(
- timedelta(seconds=(nearly_expiry_time - entry_time))))
+ self.log.info(
+ 'Test parent tx not expired after '
+ f'{timedelta(seconds=nearly_expiry_time - entry_time)} hours.')
assert_equal(entry_time, node.getmempoolentry(parent_txid)['time'])
# Transaction should be evicted from the mempool after the expiry time
@@ -87,8 +89,9 @@
# mempool is checked.
self.wallet.send_self_transfer(
from_node=node, utxo_to_spend=trigger_utxo2)
- self.log.info('Test parent tx expiry after {} hours.'.format(
- timedelta(seconds=(expiry_time - entry_time))))
+ self.log.info(
+ 'Test parent tx expiry after '
+ f'{timedelta(seconds=expiry_time - entry_time)} hours.')
assert_raises_rpc_error(-5, 'Transaction not in mempool',
node.getmempoolentry, parent_txid)
@@ -98,19 +101,21 @@
node.getmempoolentry, child_txid)
# Check that the independent tx is still in the mempool.
- self.log.info('Test the independent tx not expired after {} hours.'.format(
- timedelta(seconds=(expiry_time - half_expiry_time))))
+ self.log.info(
+ f'Test the independent tx not expired after '
+ f'{timedelta(seconds=expiry_time - half_expiry_time)} hours.')
assert_equal(
half_expiry_time,
node.getmempoolentry(independent_txid)['time'])
def run_test(self):
- self.log.info('Test default mempool expiry timeout of {} hours.'.format(
- DEFAULT_MEMPOOL_EXPIRY))
+ self.log.info(
+ 'Test default mempool expiry timeout of '
+ f'{DEFAULT_MEMPOOL_EXPIRY} hours.')
self.test_transaction_expiry(DEFAULT_MEMPOOL_EXPIRY)
- self.log.info('Test custom mempool expiry timeout of {} hours.'.format(
- CUSTOM_MEMPOOL_EXPIRY))
+ self.log.info(
+ f'Test custom mempool expiry timeout of {CUSTOM_MEMPOOL_EXPIRY} hours.')
self.restart_node(
0, [f'-mempoolexpiry={CUSTOM_MEMPOOL_EXPIRY}'])
self.test_transaction_expiry(CUSTOM_MEMPOOL_EXPIRY)
diff --git a/test/functional/mempool_unbroadcast.py b/test/functional/mempool_unbroadcast.py
--- a/test/functional/mempool_unbroadcast.py
+++ b/test/functional/mempool_unbroadcast.py
@@ -117,8 +117,10 @@
# check transaction was removed from unbroadcast set due to presence in
# a block
- removal_reason = "Removed {} from set of unbroadcast txns before " \
- "confirmation that txn was sent out".format(txhsh)
+ removal_reason = (
+ f"Removed {txhsh} from set of unbroadcast txns before confirmation that "
+ "txn was sent out"
+ )
with node.assert_debug_log([removal_reason]):
self.generate(node, 1, sync_fun=self.no_op)
diff --git a/test/functional/mempool_updatefromblock.py b/test/functional/mempool_updatefromblock.py
--- a/test/functional/mempool_updatefromblock.py
+++ b/test/functional/mempool_updatefromblock.py
@@ -118,8 +118,9 @@
if tx_count in n_tx_to_mine:
# The created transactions are mined into blocks by batches.
- self.log.info('The batch of {} transactions has been accepted'
- ' into the mempool.'.format(len(self.nodes[0].getrawmempool())))
+ self.log.info(
+ f'The batch of {len(self.nodes[0].getrawmempool())} transactions '
+ 'has been accepted into the mempool.')
block_hash = self.generate(self.nodes[0], 1)[0]
if not first_block_hash:
first_block_hash = block_hash
@@ -131,15 +132,16 @@
# At the end all of the mined blocks are invalidated, and all of the created
# transactions should be re-added from disconnected blocks to
# the mempool.
- self.log.info('The last batch of {} transactions has been'
- ' accepted into the mempool.'.format(len(self.nodes[0].getrawmempool())))
+ self.log.info(
+ f'The last batch of {len(self.nodes[0].getrawmempool())} '
+ 'transactions has been accepted into the mempool.')
start = time.time()
self.nodes[0].invalidateblock(first_block_hash)
end = time.time()
assert_equal(len(self.nodes[0].getrawmempool()), size)
self.log.info(
- 'All of the recently mined transactions have been re-added'
- ' into the mempool in {} seconds.'.format(end - start))
+ f'All of the recently mined transactions have been re-added into '
+ f'the mempool in {end - start} seconds.')
self.log.info(
'Checking descendants/ancestors properties of all of the'
diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py
--- a/test/functional/p2p_compactblocks.py
+++ b/test/functional/p2p_compactblocks.py
@@ -200,8 +200,10 @@
with p2p_lock:
assert predicate(peer), (
- "block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
- block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
+ f"block_hash={block_hash!r}, "
+ f"cmpctblock={peer.last_message.get('cmpctblock', None)!r}, "
+ f"inv={peer.last_message.get('inv', None)!r}"
+ )
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(
diff --git a/test/functional/p2p_inv_download.py b/test/functional/p2p_inv_download.py
--- a/test/functional/p2p_inv_download.py
+++ b/test/functional/p2p_inv_download.py
@@ -176,7 +176,8 @@
txid = int(ctx.rehash(), 16)
self.log.info(
- f"Announce the transaction to all nodes from all {NUM_INBOUND} incoming peers, but never send it")
+ f"Announce the transaction to all nodes from all {NUM_INBOUND} incoming "
+ "peers, but never send it")
msg = msg_inv([CInv(t=context.inv_type, h=txid)])
for p in self.peers:
p.send_and_ping(msg)
@@ -189,7 +190,8 @@
# In order to make sure the inv is sent we move the time 2 minutes
# forward, which has the added side effect that the tx can be
# unconditionally requested.
- with self.nodes[1].assert_debug_log([f"got inv: tx {uint256_hex(txid)} new peer=0"]):
+ with self.nodes[1].assert_debug_log(
+ [f"got inv: tx {uint256_hex(txid)} new peer=0"]):
self.nodes[0].setmocktime(
int(time.time()) + UNCONDITIONAL_RELAY_DELAY)
@@ -214,8 +216,9 @@
max_inbound_delay = context.constants.inbound_peer_delay + \
context.constants.overloaded_peer_delay
- self.log.info("Test that we don't load peers with more than {} getdata requests immediately".format(
- max_getdata_in_flight))
+ self.log.info(
+ f"Test that we don't load peers with more than {max_getdata_in_flight} "
+ "getdata requests immediately")
invids = [i for i in range(max_getdata_in_flight + 2)]
p = self.nodes[0].p2ps[0]
@@ -235,9 +238,8 @@
p.send_message(msg_inv([CInv(t=context.inv_type, h=invids[i])]))
p.sync_with_ping()
self.log.info(
- "No more than {} requests should be seen within {} seconds after announcement".format(
- max_getdata_in_flight,
- max_inbound_delay - 1))
+ f"No more than {max_getdata_in_flight} requests should be seen within "
+ f"{max_inbound_delay - 1} seconds after announcement")
self.nodes[0].setmocktime(
mock_time +
max_inbound_delay - 1)
@@ -245,8 +247,8 @@
with p2p_lock:
assert_equal(p.getdata_count, max_getdata_in_flight)
self.log.info(
- "If we wait {} seconds after announcement, we should eventually get more requests".format(
- max_inbound_delay))
+ f"If we wait {max_inbound_delay} seconds after announcement, we should "
+ f"eventually get more requests")
self.nodes[0].setmocktime(
mock_time +
max_inbound_delay)
diff --git a/test/functional/p2p_invalid_locator.py b/test/functional/p2p_invalid_locator.py
--- a/test/functional/p2p_invalid_locator.py
+++ b/test/functional/p2p_invalid_locator.py
@@ -24,8 +24,9 @@
self.log.info('Test max locator size')
block_count = node.getblockcount()
for msg in [msg_getheaders(), msg_getblocks()]:
- self.log.info('Wait for disconnect when sending {} hashes in locator'.format(
- MAX_LOCATOR_SZ + 1))
+ self.log.info(
+ f'Wait for disconnect when sending {MAX_LOCATOR_SZ + 1} hashes in '
+ f'locator')
exceed_max_peer = node.add_p2p_connection(P2PInterface())
msg.locator.vHave = [int(node.getblockhash(
i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ + 1), -1)]
diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py
--- a/test/functional/p2p_permissions.py
+++ b/test/functional/p2p_permissions.py
@@ -183,8 +183,8 @@
[tx],
self.nodes[1],
success=False,
- reject_reason='Not relaying non-mempool transaction '
- '{} from forcerelay peer=0'.format(txid),
+ reject_reason=f'Not relaying non-mempool transaction {txid} from '
+ f'forcerelay peer=0',
)
def checkpermission(self, args, expectedPermissions):
diff --git a/test/functional/rpc_bind.py b/test/functional/rpc_bind.py
--- a/test/functional/rpc_bind.py
+++ b/test/functional/rpc_bind.py
@@ -83,12 +83,12 @@
at a non-localhost IP.
'''
self.log.info(f"Allow IP test for {rpchost}:{rpcport}")
- node_args = \
- ['-disablewallet', '-nolisten'] + \
- [f"-rpcallowip={x}" for x in allow_ips] + \
- [f"-rpcbind={addr}" for addr in ['127.0.0.1',
- "{}:{}".format(rpchost,
- rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
+ node_args = (
+ ['-disablewallet', '-nolisten'] +
+ [f"-rpcallowip={x}" for x in allow_ips] +
+ # Bind to localhost as well so start_nodes doesn't hang
+ [f"-rpcbind={addr}" for addr in ['127.0.0.1', f"{rpchost}:{rpcport}"]]
+ )
self.nodes[0].host = None
self.start_nodes([node_args])
# connect to node through non-loopback interface
diff --git a/test/functional/rpc_createmultisig.py b/test/functional/rpc_createmultisig.py
--- a/test/functional/rpc_createmultisig.py
+++ b/test/functional/rpc_createmultisig.py
@@ -133,9 +133,8 @@
path = os.path.join(self.options.tmpdir, "node1", "regtest",
"wallets", "wmulti")
if e.error['code'] == -18 and (
- "Wallet file verification failed. Failed to load "
- "database path '{}'. Path does not exist.".format(path)
- in e.error['message']):
+ "Wallet file verification failed. Failed to load database "
+ f"path '{path}'. Path does not exist." in e.error['message']):
node1.createwallet(wallet_name='wmulti',
disable_private_keys=True)
else:
diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py
--- a/test/functional/rpc_rawtransaction.py
+++ b/test/functional/rpc_rawtransaction.py
@@ -138,10 +138,12 @@
self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range",
self.nodes[0].createrawtransaction, [], {address: -1})
- assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: {}".format(
- address), self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
- assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: {}".format(
- address), self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
+ assert_raises_rpc_error(-8, f"Invalid parameter, duplicated address: {address}",
+ self.nodes[0].createrawtransaction, [],
+ multidict([(address, 1), (address, 1)]))
+ assert_raises_rpc_error(-8, f"Invalid parameter, duplicated address: {address}",
+ self.nodes[0].createrawtransaction, [],
+ [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8,
"Invalid parameter, duplicate key: data",
self.nodes[0].createrawtransaction,
diff --git a/test/functional/wallet_address_types.py b/test/functional/wallet_address_types.py
--- a/test/functional/wallet_address_types.py
+++ b/test/functional/wallet_address_types.py
@@ -129,8 +129,9 @@
descsum_create(f"pkh({key_descs[info['pubkey']]})"))
elif typ == 'legacy':
# P2SH-multisig
- assert_equal(info['desc'], descsum_create("sh(multi(2,{},{}))".format(
- key_descs[info['pubkeys'][0]], key_descs[info['pubkeys'][1]])))
+ assert_equal(info['desc'],
+ descsum_create(f"sh(multi(2,{key_descs[info['pubkeys'][0]]},"
+ f"{key_descs[info['pubkeys'][1]]}))"))
else:
# Unknown type
assert False
diff --git a/test/functional/wallet_dump.py b/test/functional/wallet_dump.py
--- a/test/functional/wallet_dump.py
+++ b/test/functional/wallet_dump.py
@@ -125,9 +125,9 @@
dump_time,
tz=datetime.timezone.utc,
).replace(tzinfo=None).isoformat())
- dump_best_block_1 = '# * Best block at time of backup was {} ({}),'.format(
- self.nodes[0].getblockcount(),
- self.nodes[0].getbestblockhash(),
+ dump_best_block_1 = (
+ f'# * Best block at time of backup was {self.nodes[0].getblockcount()} '
+ f'({self.nodes[0].getbestblockhash()}),'
)
dump_best_block_2 = '# mined on {}Z'.format(
datetime.datetime.fromtimestamp(
diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py
--- a/test/functional/wallet_multiwallet.py
+++ b/test/functional/wallet_multiwallet.py
@@ -314,8 +314,8 @@
"wallets", "wallets")
assert_raises_rpc_error(
-18,
- "Wallet file verification failed. Failed to load database path "
- "'{}'. Path does not exist.".format(path),
+ f"Wallet file verification failed. Failed to load database path '{path}'. "
+ "Path does not exist.",
self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
@@ -328,8 +328,8 @@
self.wallet_data_filename)
assert_raises_rpc_error(
-4,
- "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(
- path),
+ "Wallet file verification failed. Refusing to load database. "
+ f"Data file '{path}' is already loaded.",
self.nodes[0].loadwallet,
wallet_names[0])
@@ -343,8 +343,8 @@
self.wallet_data_filename)
assert_raises_rpc_error(
-4,
- "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(
- path),
+ "Wallet file verification failed. Refusing to load database. "
+ f"Data file '{path}' is already loaded.",
self.nodes[0].loadwallet,
self.wallet_data_filename)
@@ -373,8 +373,8 @@
"wallets", "empty_wallet_dir")
assert_raises_rpc_error(
-18,
- "Wallet file verification failed. Failed to load database "
- "path '{}'. Data is not in recognized format.".format(path),
+ f"Wallet file verification failed. Failed to load database path '{path}'. "
+ "Data is not in recognized format.",
self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")

File Metadata

Mime Type
text/plain
Expires
Sat, Apr 26, 11:41 (14 h, 38 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5573428
Default Alt Text
D13209.diff (31 KB)

Event Timeline