Page Menu
Home
Phabricator
Search
Configure Global Search
Log In
Files
F12428762
D5217.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
141 KB
Subscribers
None
D5217.diff
View Options
diff --git a/.arclint b/.arclint
--- a/.arclint
+++ b/.arclint
@@ -17,7 +17,8 @@
"version": ">=1.3.4",
"include": "(\\.py$)",
"flags": [
- "--global-config=.autopep8"
+ "--select=E,W",
+ "--aggressive"
]
},
"flake8": {
diff --git a/.autopep8 b/.autopep8
deleted file mode 100644
--- a/.autopep8
+++ /dev/null
@@ -1,2 +0,0 @@
-[pycodestyle]
-select = E,W
diff --git a/contrib/devtools/chainparams/test_make_chainparams.py b/contrib/devtools/chainparams/test_make_chainparams.py
--- a/contrib/devtools/chainparams/test_make_chainparams.py
+++ b/contrib/devtools/chainparams/test_make_chainparams.py
@@ -9,7 +9,8 @@
class MockRPC:
- def __init__(self, test, chain, numBlocks, expectedBlock, blockHash, chainWork):
+ def __init__(self, test, chain, numBlocks,
+ expectedBlock, blockHash, chainWork):
self.test = test
self.chain = chain
self.numBlocks = numBlocks
@@ -69,7 +70,8 @@
class MockFailRPC(MockRPC):
# Provides a fail counter to fail after the Nth RPC command
- def __init__(self, test, chain, numBlocks, expectedBlock, blockHash, chainWork, failCounter):
+ def __init__(self, test, chain, numBlocks, expectedBlock,
+ blockHash, chainWork, failCounter):
super().__init__(test, chain, numBlocks, expectedBlock, blockHash, chainWork)
self.failCounter = failCounter
diff --git a/contrib/devtools/circular-dependencies.py b/contrib/devtools/circular-dependencies.py
--- a/contrib/devtools/circular-dependencies.py
+++ b/contrib/devtools/circular-dependencies.py
@@ -66,8 +66,10 @@
closure[dep] = closure[src] + [src]
if len(closure) == old_size:
break
- # If module is in its own transitive closure, it's a circular dependency; check if it is the shortest
- if module in closure and (shortest_cycle is None or len(closure[module]) + 1 < len(shortest_cycle)):
+ # If module is in its own transitive closure, it's a circular
+ # dependency; check if it is the shortest
+ if module in closure and (shortest_cycle is None or len(
+ closure[module]) + 1 < len(shortest_cycle)):
shortest_cycle = [module] + closure[module]
if shortest_cycle is None:
break
diff --git a/contrib/devtools/copyright_header.py b/contrib/devtools/copyright_header.py
--- a/contrib/devtools/copyright_header.py
+++ b/contrib/devtools/copyright_header.py
@@ -11,9 +11,9 @@
import datetime
import os
-################################################################################
+##########################################################################
# file filtering
-################################################################################
+##########################################################################
EXCLUDE = [
# libsecp256k1:
@@ -46,12 +46,12 @@
def applies_to_file(filename):
- return ((EXCLUDE_COMPILED.match(filename) is None) and
- (INCLUDE_COMPILED.match(filename) is not None))
+ return ((EXCLUDE_COMPILED.match(filename) is None)
+ and (INCLUDE_COMPILED.match(filename) is not None))
-################################################################################
+##########################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
-################################################################################
+##########################################################################
GIT_LS_CMD = 'git ls-files'
@@ -67,9 +67,9 @@
return sorted([filename for filename in filenames if
applies_to_file(filename)])
-################################################################################
+##########################################################################
# define and compile regexes for the patterns we are looking for
-################################################################################
+##########################################################################
COPYRIGHT_WITH_C = r'Copyright \(c\)'
@@ -131,9 +131,9 @@
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
-################################################################################
+##########################################################################
# search file contents for copyright message of particular category
-################################################################################
+##########################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
@@ -154,9 +154,9 @@
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
-################################################################################
+##########################################################################
# get file info
-################################################################################
+##########################################################################
def read_file(filename):
@@ -189,9 +189,9 @@
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
-################################################################################
+##########################################################################
# report execution
-################################################################################
+##########################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
@@ -287,9 +287,9 @@
print_report(file_infos, verbose)
os.chdir(original_cwd)
-################################################################################
+##########################################################################
# report cmd
-################################################################################
+##########################################################################
REPORT_USAGE = """
@@ -322,9 +322,9 @@
exec_report(base_directory, verbose)
-################################################################################
+##########################################################################
# query git for year of last change
-################################################################################
+##########################################################################
GIT_LOG_CMD = "git log --pretty=format:%ai {}"
@@ -346,9 +346,9 @@
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
-################################################################################
+##########################################################################
# read and write to file
-################################################################################
+##########################################################################
def read_file_lines(filename):
@@ -363,9 +363,9 @@
f.write(''.join(file_lines))
f.close()
-################################################################################
+##########################################################################
# update header years execution
-################################################################################
+##########################################################################
COPYRIGHT = r'Copyright \(c\)'
@@ -424,9 +424,9 @@
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
- return (before_copyright + copyright_splitter +
- year_range_to_str(start_year, last_git_change_year) + ' ' +
- ' '.join(space_split[1:]))
+ return (before_copyright + copyright_splitter
+ + year_range_to_str(start_year, last_git_change_year) + ' '
+ + ' '.join(space_split[1:]))
def update_updatable_copyright(filename):
@@ -454,9 +454,9 @@
update_updatable_copyright(filename)
os.chdir(original_cwd)
-################################################################################
+##########################################################################
# update cmd
-################################################################################
+##########################################################################
UPDATE_USAGE = """
@@ -502,9 +502,9 @@
sys.exit("*** bad base_directory: {}".format(base_directory))
exec_update_header_year(base_directory)
-################################################################################
+##########################################################################
# inserted copyright header format
-################################################################################
+##########################################################################
def get_header_lines(header, start_year, end_year):
@@ -534,27 +534,27 @@
def get_python_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
-################################################################################
+##########################################################################
# query git for year of last change
-################################################################################
+##########################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
-################################################################################
+##########################################################################
# check for existing ABC copyright
-################################################################################
+##########################################################################
def file_already_has_bitcoin_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index is not None
-################################################################################
+##########################################################################
# insert header execution
-################################################################################
+##########################################################################
def file_has_hashbang(file_lines):
@@ -602,9 +602,9 @@
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
-################################################################################
+##########################################################################
# insert cmd
-################################################################################
+##########################################################################
INSERT_USAGE = """
@@ -650,9 +650,9 @@
style = 'cpp'
exec_insert_header(filename, style)
-################################################################################
+##########################################################################
# UI
-################################################################################
+##########################################################################
USAGE = """
diff --git a/contrib/devtools/optimize-pngs.py b/contrib/devtools/optimize-pngs.py
--- a/contrib/devtools/optimize-pngs.py
+++ b/contrib/devtools/optimize-pngs.py
@@ -56,7 +56,8 @@
sys.exit(0)
# verify
- if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT, universal_newlines=True):
+ if "Not a PNG file" in subprocess.check_output(
+ [pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT, universal_newlines=True):
print("PNG file " + file +
" is corrupted after crushing, check out pngcursh version")
sys.exit(1)
diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py
--- a/contrib/devtools/security-check.py
+++ b/contrib/devtools/security-check.py
@@ -105,7 +105,8 @@
raise IOError('Error opening file')
for line in stdout.splitlines():
tokens = line.split()
- if len(tokens) > 1 and tokens[1] == '(BIND_NOW)' or (len(tokens) > 2 and tokens[1] == '(FLAGS)' and 'BIND_NOW' in tokens[2]):
+ if len(tokens) > 1 and tokens[1] == '(BIND_NOW)' or (
+ len(tokens) > 2 and tokens[1] == '(FLAGS)' and 'BIND_NOW' in tokens[2]):
have_bindnow = True
return have_gnu_relro and have_bindnow
@@ -177,7 +178,8 @@
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
(arch, bits) = get_PE_dll_characteristics(executable)
- return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
+ return (
+ bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
CHECKS = {
diff --git a/contrib/devtools/update-translations.py b/contrib/devtools/update-translations.py
--- a/contrib/devtools/update-translations.py
+++ b/contrib/devtools/update-translations.py
@@ -35,7 +35,9 @@
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
- print('Execute this script at the root of the repository', file=sys.stderr)
+ print(
+ 'Execute this script at the root of the repository',
+ file=sys.stderr)
sys.exit(1)
@@ -76,7 +78,8 @@
if numeric:
other = []
- # numeric (Qt) can be present in any order, others (strprintf) must be in specified order
+ # numeric (Qt) can be present in any order, others (strprintf) must be in
+ # specified order
return set(numeric), other
@@ -99,8 +102,10 @@
return False
else:
if source_f != translation_f:
- if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
- # Allow numerus translations to omit %n specifier (usually when it only has one possible value)
+ if numerus and source_f == (set(), ['n']) and translation_f == (
+ set(), []) and translation.find('%') == -1:
+ # Allow numerus translations to omit %n specifier (usually when
+ # it only has one possible value)
return True
errors.append("Mismatch between '{}' and '{}'".format(
sanitize_string(source), sanitize_string(translation)))
@@ -111,7 +116,8 @@
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
- if not filename.endswith('.ts' + suffix) or filename == SOURCE_LANG + suffix:
+ if not filename.endswith(
+ '.ts' + suffix) or filename == SOURCE_LANG + suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
@@ -153,11 +159,13 @@
have_errors = False
for (filename, filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
- # need to override encoding because 'utf8' is not understood only 'utf-8'
+ # need to override encoding because 'utf8' is not understood only
+ # 'utf-8'
parser = ET.XMLParser(encoding='utf-8')
with open(filepath + '.orig', 'rb') as f:
data = f.read()
- # remove control characters; this must be done over the entire file otherwise the XML parser will fail
+ # remove control characters; this must be done over the entire file
+ # otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
@@ -209,7 +217,8 @@
continue
# write fixed-up tree
- # if diff reduction requested, replace some XML to 'sanitize' to qt formatting
+ # if diff reduction requested, replace some XML to 'sanitize' to qt
+ # formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
diff --git a/contrib/gitian-build.py b/contrib/gitian-build.py
--- a/contrib/gitian-build.py
+++ b/contrib/gitian-build.py
@@ -249,7 +249,8 @@
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
- # Set environment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker
+ # Set environment variable USE_LXC or USE_DOCKER, let gitian-builder know
+ # that we use lxc or docker
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
@@ -260,7 +261,8 @@
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
# Disable for MacOS if no SDK found
- if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
+ if args.macos and not os.path.isfile(
+ 'gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py
--- a/contrib/linearize/linearize-data.py
+++ b/contrib/linearize/linearize-data.py
@@ -145,7 +145,8 @@
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
- if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
+ if not self.fileOutput and (
+ (self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
@@ -203,7 +204,8 @@
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
- # If the data is cached, use it from memory and remove from the cache
+ # If the data is cached, use it from memory and remove from the
+ # cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
@@ -243,7 +245,8 @@
self.hash_str = calc_hash_str(blk_hdr)
if self.hash_str not in blkmap:
# Because blocks can be written to files out-of-order as of 0.10, the script
- # may encounter blocks it doesn't know about. Treat as debug output.
+ # may encounter blocks it doesn't know about. Treat as debug
+ # output.
if settings['debug_output'] == 'true':
print("Skipping unknown block " + self.hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
diff --git a/contrib/linearize/linearize-hashes.py b/contrib/linearize/linearize-hashes.py
--- a/contrib/linearize/linearize-hashes.py
+++ b/contrib/linearize/linearize-hashes.py
@@ -76,8 +76,8 @@
height = settings['min_height']
while height < settings['max_height'] + 1:
- num_blocks = min(settings['max_height']
- + 1 - height, max_blocks_per_call)
+ num_blocks = min(settings['max_height'] +
+ 1 - height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
@@ -154,7 +154,8 @@
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
- # Force hash byte format setting to be lowercase to make comparisons easier.
+ # Force hash byte format setting to be lowercase to make comparisons
+ # easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
diff --git a/contrib/seeds/makeseeds.py b/contrib/seeds/makeseeds.py
--- a/contrib/seeds/makeseeds.py
+++ b/contrib/seeds/makeseeds.py
@@ -45,7 +45,8 @@
if len(sline) < 11:
return None
- # The user agent is at the end of the line. It may contain space, so we concatenate.
+ # The user agent is at the end of the line. It may contain space, so we
+ # concatenate.
for i in range(12, len(sline)):
sline[11] += ' ' + sline[i]
@@ -71,7 +72,8 @@
if m.group(1) in ['::']:
return None
ipstr = m.group(1)
- # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
+ # XXX parse IPv6 into number, could use name_to_ipv6 from
+ # generate-seeds
sortkey = ipstr
port = int(m.group(2))
else:
diff --git a/contrib/testgen/gen_base58_test_vectors.py b/contrib/testgen/gen_base58_test_vectors.py
--- a/contrib/testgen/gen_base58_test_vectors.py
+++ b/contrib/testgen/gen_base58_test_vectors.py
@@ -72,7 +72,8 @@
yield (rv, hexrepr, metadata)
-def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix):
+def gen_invalid_vector(template, corrupt_prefix,
+ randomize_payload_size, corrupt_suffix):
'''Generate possibly invalid vector'''
if corrupt_prefix:
prefix = os.urandom(1)
diff --git a/share/qt/extract_strings_qt.py b/share/qt/extract_strings_qt.py
--- a/share/qt/extract_strings_qt.py
+++ b/share/qt/extract_strings_qt.py
@@ -57,7 +57,9 @@
# xgettext -n --keyword=_ $FILES
XGETTEXT = os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
- print('Cannot extract strings: xgettext utility is not installed or not configured.', file=sys.stderr)
+ print(
+ 'Cannot extract strings: xgettext utility is not installed or not configured.',
+ file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',
file=sys.stderr)
sys.exit(1)
diff --git a/test/functional/abc-finalize-block.py b/test/functional/abc-finalize-block.py
--- a/test/functional/abc-finalize-block.py
+++ b/test/functional/abc-finalize-block.py
@@ -204,7 +204,8 @@
self.log.info(
"Try to finalize a block on an already finalized chain...")
- # Finalizing a block of an already finalized chain should have no effect
+ # Finalizing a block of an already finalized chain should have no
+ # effect
block_218 = node.getblockheader(alt_node_tip)['previousblockhash']
node.finalizeblock(block_218)
assert_equal(node.getfinalizedblockhash(), alt_node_tip)
@@ -282,7 +283,8 @@
# Restart the new node, so the blocks have no header received time.
self.restart_node(2)
- # There should be no finalized block (getfinalizedblockhash returns an empty string)
+ # There should be no finalized block (getfinalizedblockhash returns an
+ # empty string)
assert_equal(delay_node.getfinalizedblockhash(), str())
# Generate 20 blocks with no delay. This should not trigger auto-finalization.
diff --git a/test/functional/abc-invalid-message.py b/test/functional/abc-invalid-message.py
--- a/test/functional/abc-invalid-message.py
+++ b/test/functional/abc-invalid-message.py
@@ -36,7 +36,8 @@
class BadVersionP2PInterface(P2PInterface):
- def peer_connect(self, *args, services=NODE_NETWORK, send_version=False, **kwargs):
+ def peer_connect(self, *args, services=NODE_NETWORK,
+ send_version=False, **kwargs):
create_conn = super().peer_connect(*args, send_version=send_version, **kwargs)
# Send version message with invalid checksum
diff --git a/test/functional/abc-mempool-accept-txn.py b/test/functional/abc-mempool-accept-txn.py
--- a/test/functional/abc-mempool-accept-txn.py
+++ b/test/functional/abc-mempool-accept-txn.py
@@ -97,13 +97,15 @@
tx.vin[0].scriptSig = CScript(
[self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))])
- def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])):
+ def create_and_sign_transaction(
+ self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, n, value, script)
self.sign_tx(tx, spend_tx, n)
tx.rehash()
return tx
- def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE])):
+ def next_block(self, number, spend=None,
+ additional_coinbase_value=0, script=CScript([OP_TRUE])):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
diff --git a/test/functional/abc-mempool-coherence-on-activations.py b/test/functional/abc-mempool-coherence-on-activations.py
--- a/test/functional/abc-mempool-coherence-on-activations.py
+++ b/test/functional/abc-mempool-coherence-on-activations.py
@@ -103,11 +103,13 @@
def create_fund_and_pre_fork_only_tx(spend):
- return create_fund_and_activation_specific_spending_tx(spend, pre_fork_only=True)
+ return create_fund_and_activation_specific_spending_tx(
+ spend, pre_fork_only=True)
def create_fund_and_post_fork_only_tx(spend):
- return create_fund_and_activation_specific_spending_tx(spend, pre_fork_only=False)
+ return create_fund_and_activation_specific_spending_tx(
+ spend, pre_fork_only=False)
# ---Mempool coherence on activations test---
diff --git a/test/functional/abc-p2p-compactblocks.py b/test/functional/abc-p2p-compactblocks.py
--- a/test/functional/abc-p2p-compactblocks.py
+++ b/test/functional/abc-p2p-compactblocks.py
@@ -115,7 +115,8 @@
tx = create_tx_with_script(spend_tx, n, b"", value, script)
return tx
- def next_block(self, number, spend=None, script=CScript([OP_TRUE]), block_size=0, extra_txns=0):
+ def next_block(self, number, spend=None, script=CScript(
+ [OP_TRUE]), block_size=0, extra_txns=0):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
@@ -155,14 +156,16 @@
tx = get_base_transaction()
# Make it the same format as transaction added for padding and save the size.
- # It's missing the padding output, so we add a constant to account for it.
+ # It's missing the padding output, so we add a constant to account
+ # for it.
tx.rehash()
# If a specific script is required, add it.
if script is not None:
tx.vout.append(CTxOut(1, script))
- # Put some random data into the first transaction of the chain to randomize ids.
+ # Put some random data into the first transaction of the chain to
+ # randomize ids.
tx.vout.append(
CTxOut(0, CScript([random.randint(0, 256), OP_RETURN])))
@@ -332,7 +335,8 @@
# In order to avoid having to resend a ton of transactions, we invalidate
# b2, which will send all its transactions in the mempool. Note that this
- # assumes reorgs will insert low-fee transactions back into the mempool.
+ # assumes reorgs will insert low-fee transactions back into the
+ # mempool.
node.invalidateblock(node.getbestblockhash())
# Let's send a compact block and see if the node accepts it.
diff --git a/test/functional/abc-p2p-fullblocktest-sigops.py b/test/functional/abc-p2p-fullblocktest-sigops.py
--- a/test/functional/abc-p2p-fullblocktest-sigops.py
+++ b/test/functional/abc-p2p-fullblocktest-sigops.py
@@ -89,7 +89,8 @@
tx = create_tx_with_script(spend.tx, spend.n, b"", value, script)
return tx
- def next_block(self, number, spend=None, script=CScript([OP_TRUE]), block_size=0, extra_sigops=0):
+ def next_block(self, number, spend=None, script=CScript(
+ [OP_TRUE]), block_size=0, extra_sigops=0):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
@@ -128,7 +129,8 @@
tx = get_base_transaction()
# Make it the same format as transaction added for padding and save the size.
- # It's missing the padding output, so we add a constant to account for it.
+ # It's missing the padding output, so we add a constant to account
+ # for it.
tx.rehash()
base_tx_size = len(tx.serialize()) + 18
@@ -136,7 +138,8 @@
if script is not None:
tx.vout.append(CTxOut(1, script))
- # Put some random data into the first transaction of the chain to randomize ids.
+ # Put some random data into the first transaction of the chain to
+ # randomize ids.
tx.vout.append(
CTxOut(0, CScript([random.randint(0, 256), OP_RETURN])))
@@ -161,7 +164,8 @@
if script_length > 510000:
if script_length < 1000000:
# Make sure we don't find ourselves in a position where we
- # need to generate a transaction smaller than what we expected.
+ # need to generate a transaction smaller than what we
+ # expected.
script_length = script_length // 2
else:
script_length = 500000
diff --git a/test/functional/abc-p2p-fullblocktest.py b/test/functional/abc-p2p-fullblocktest.py
--- a/test/functional/abc-p2p-fullblocktest.py
+++ b/test/functional/abc-p2p-fullblocktest.py
@@ -76,7 +76,8 @@
tx = create_tx_with_script(spend.tx, spend.n, b"", value, script)
return tx
- def next_block(self, number, spend=None, script=CScript([OP_TRUE]), block_size=0):
+ def next_block(self, number, spend=None,
+ script=CScript([OP_TRUE]), block_size=0):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
@@ -115,7 +116,8 @@
tx = get_base_transaction()
# Make it the same format as transaction added for padding and save the size.
- # It's missing the padding output, so we add a constant to account for it.
+ # It's missing the padding output, so we add a constant to account
+ # for it.
tx.rehash()
base_tx_size = len(tx.serialize()) + 18
@@ -123,7 +125,8 @@
if script is not None:
tx.vout.append(CTxOut(1, script))
- # Put some random data into the first transaction of the chain to randomize ids.
+ # Put some random data into the first transaction of the chain to
+ # randomize ids.
tx.vout.append(
CTxOut(0, CScript([random.randint(0, 256), OP_RETURN])))
@@ -148,7 +151,8 @@
if script_length > 510000:
if script_length < 1000000:
# Make sure we don't find ourselves in a position where we
- # need to generate a transaction smaller than what we expected.
+ # need to generate a transaction smaller than what we
+ # expected.
script_length = script_length // 2
else:
script_length = 500000
diff --git a/test/functional/abc-schnorr.py b/test/functional/abc-schnorr.py
--- a/test/functional/abc-schnorr.py
+++ b/test/functional/abc-schnorr.py
@@ -43,10 +43,12 @@
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error
-# A mandatory (bannable) error occurs when people pass Schnorr signatures into OP_CHECKMULTISIG.
+# A mandatory (bannable) error occurs when people pass Schnorr signatures
+# into OP_CHECKMULTISIG.
SCHNORR_MULTISIG_ERROR = 'mandatory-script-verify-flag-failed (Signature cannot be 65 bytes in CHECKMULTISIG)'
-# A mandatory (bannable) error occurs when people send invalid Schnorr sigs into OP_CHECKSIG.
+# A mandatory (bannable) error occurs when people send invalid Schnorr
+# sigs into OP_CHECKSIG.
NULLFAIL_ERROR = 'mandatory-script-verify-flag-failed (Signature must be zero for failed CHECK(MULTI)SIG operation)'
# Blocks with invalid scripts give this error:
diff --git a/test/functional/abc-sigops-mempool-mining.py b/test/functional/abc-sigops-mempool-mining.py
--- a/test/functional/abc-sigops-mempool-mining.py
+++ b/test/functional/abc-sigops-mempool-mining.py
@@ -241,7 +241,8 @@
self.log.info(
"Broadcasting regular transactions will push out the high-sigops txns.")
- # We can broadcast a bunch of regular txes. They need to pay a bit more fee (1.5 sat/vbyte) than the floor.
+ # We can broadcast a bunch of regular txes. They need to pay a bit more
+ # fee (1.5 sat/vbyte) than the floor.
for i in range(15):
spendfrom = self.spendable_outputs.popleft()
ctx = create_var_transaction(spendfrom, b'', 100000, 150000)
@@ -264,7 +265,8 @@
self.log.info(
'Reset the mempool fee floor (currently, restarting the node achieves this).')
- # could also be done by setting mocktime in future (the floor decays over a period of hours)
+ # could also be done by setting mocktime in future (the floor decays
+ # over a period of hours)
self.restart_node(0, self.extra_args[0])
(node,) = self.nodes
node.add_p2p_connection(P2PDataStore())
diff --git a/test/functional/abc-transaction-ordering.py b/test/functional/abc-transaction-ordering.py
--- a/test/functional/abc-transaction-ordering.py
+++ b/test/functional/abc-transaction-ordering.py
@@ -95,13 +95,15 @@
tx = get_base_transaction()
# Make it the same format as transaction added for padding and save the size.
- # It's missing the padding output, so we add a constant to account for it.
+ # It's missing the padding output, so we add a constant to account
+ # for it.
tx.rehash()
# Add the transaction to the block
self.add_transactions_to_block(block, [tx])
- # If we have a transaction count requirement, just fill the block until we get there
+ # If we have a transaction count requirement, just fill the block
+ # until we get there
while len(block.vtx) < tx_count:
# Create the new transaction and add it.
tx = get_base_transaction()
diff --git a/test/functional/combine_logs.py b/test/functional/combine_logs.py
--- a/test/functional/combine_logs.py
+++ b/test/functional/combine_logs.py
@@ -37,7 +37,8 @@
print("Only one out of --color or --html should be specified")
sys.exit(1)
- # There should only be one unknown argument - the path of the temporary test directory
+ # There should only be one unknown argument - the path of the temporary
+ # test directory
if len(unknown_args) != 1:
print("Unexpected arguments" + str(unknown_args))
sys.exit(1)
@@ -76,14 +77,16 @@
# skip blank lines
if line == '\n':
continue
- # if this line has a timestamp, it's the start of a new log event.
+ # if this line has a timestamp, it's the start of a new log
+ # event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
event = line
timestamp = time_match.group()
- # if it doesn't have a timestamp, it's a continuation line of the previous log.
+ # if it doesn't have a timestamp, it's a continuation line of
+ # the previous log.
else:
event += "\n" + line
# Flush the final event
diff --git a/test/functional/example_test.py b/test/functional/example_test.py
--- a/test/functional/example_test.py
+++ b/test/functional/example_test.py
@@ -65,7 +65,8 @@
If this function is more generally useful for other tests, consider
moving it to a module in test_framework."""
- # self.log.info("running custom_function") # Oops! Can't run self.log outside the BitcoinTestFramework
+ # self.log.info("running custom_function") # Oops! Can't run self.log
+ # outside the BitcoinTestFramework
pass
@@ -84,7 +85,8 @@
# Use self.extra_args to change command-line arguments for the nodes
self.extra_args = [[], ["-logips"], []]
- # self.log.info("I've finished set_test_params") # Oops! Can't run self.log before run_test()
+ # self.log.info("I've finished set_test_params") # Oops! Can't run
+ # self.log before run_test()
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
@@ -135,7 +137,8 @@
def run_test(self):
"""Main test logic"""
- # Create P2P connections will wait for a verack to make sure the connection is fully up
+ # Create P2P connections will wait for a verack to make sure the
+ # connection is fully up
self.nodes[0].add_p2p_connection(BaseNode())
# Generating a block on one of the nodes will get us out of IBD
@@ -174,7 +177,8 @@
self.tip, create_coinbase(height), self.block_time)
block.solve()
block_message = msg_block(block)
- # Send message is used to send a P2P message to the node over our P2PInterface
+ # Send message is used to send a P2P message to the node over our
+ # P2PInterface
self.nodes[0].p2p.send_message(block_message)
self.tip = block.sha256
blocks.append(self.tip)
@@ -209,7 +213,8 @@
self.log.info("Check that each block was received only once")
# The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
# messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
- # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
+ # and synchronization issues. Note wait_until() acquires this global
+ # lock when testing the predicate.
with mininode_lock:
for block in self.nodes[2].p2p.block_receive_map.values():
assert_equal(block, 1)
diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py
--- a/test/functional/feature_assumevalid.py
+++ b/test/functional/feature_assumevalid.py
@@ -137,7 +137,8 @@
self.block_time += 1
height += 1
- # Create a transaction spending the coinbase output with an invalid (null) signature
+ # Create a transaction spending the coinbase output with an invalid
+ # (null) signature
tx = CTransaction()
tx.vin.append(
CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
@@ -170,7 +171,8 @@
self.nodes[0].disconnect_p2ps()
- # Start node1 and node2 with assumevalid so they accept a block with a bad signature.
+ # Start node1 and node2 with assumevalid so they accept a block with a
+ # bad signature.
self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)])
self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)])
@@ -192,7 +194,8 @@
# Send all blocks to node1. All blocks will be accepted.
for i in range(2202):
p2p1.send_message(msg_block(self.blocks[i]))
- # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync.
+ # Syncing 2200 blocks can take a while on slow systems. Give it plenty
+ # of time to sync.
p2p1.sync_with_ping(960)
assert_equal(self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['height'], 2202)
diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py
--- a/test/functional/feature_bip68_sequence.py
+++ b/test/functional/feature_bip68_sequence.py
@@ -204,7 +204,8 @@
# can only timelock this input if it's not too old --
# otherwise use height
can_time_lock = True
- if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
+ if ((cur_time - orig_time)
+ >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time
@@ -296,7 +297,8 @@
assert_raises_rpc_error(-26, NOT_FINAL_ERROR,
node.sendrawtransaction, ToHex(tx))
else:
- # sendrawtransaction should succeed if the tx is not in the mempool
+ # sendrawtransaction should succeed if the tx is not in the
+ # mempool
node.sendrawtransaction(ToHex(tx))
return tx
diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py
--- a/test/functional/feature_block.py
+++ b/test/functional/feature_block.py
@@ -123,7 +123,8 @@
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
- # Nothing should happen at this point. We saw b2 first so it takes priority.
+ # Nothing should happen at this point. We saw b2 first so it takes
+ # priority.
self.log.info("Don't reorg to a chain of the same length")
self.move_tip(1)
b3 = self.next_block(3, spend=out[1])
@@ -343,7 +344,8 @@
# \-> b38 (11/37)
#
- # save 37's spendable output, but then double-spend out11 to invalidate the block
+ # save 37's spendable output, but then double-spend out11 to invalidate
+ # the block
self.log.info(
"Reject a block spending transaction from a block which failed to connect")
self.move_tip(35)
@@ -354,7 +356,8 @@
self.sync_blocks([b37], success=False,
reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
- # attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
+ # attempt to spend b37's first non-coinbase tx, at which point b37 was
+ # still considered valid
self.move_tip(35)
b38 = self.next_block(38, spend=txout_b37)
self.sync_blocks([b38], success=False,
@@ -387,7 +390,8 @@
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
- # the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
+ # the first transaction be non-coinbase, etc. The purpose of b44 is to
+ # make sure this works.
self.log.info("Build block 44 manually")
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
@@ -819,7 +823,8 @@
# \-> b71 (21)
#
# b72 is a good block.
- # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b72.
+ # b71 is a copy of 72, but re-adds one of its transactions. However,
+ # it has the same hash as b72.
self.log.info(
"Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability")
self.move_tip(69)
@@ -1035,10 +1040,12 @@
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])):
- return create_tx_with_script(spend_tx, n, amount=value, script_pub_key=script)
+ return create_tx_with_script(
+ spend_tx, n, amount=value, script_pub_key=script)
# sign a transaction, using the key we know about
- # this signs input 0 in tx, which is assumed to be spending output n in spend_tx
+ # this signs input 0 in tx, which is assumed to be spending output n in
+ # spend_tx
def sign_tx(self, tx, spend_tx):
scriptPubKey = bytearray(spend_tx.vout[0].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
@@ -1049,13 +1056,15 @@
tx.vin[0].scriptSig = CScript(
[self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))])
- def create_and_sign_transaction(self, spend_tx, value, script=CScript([OP_TRUE])):
+ def create_and_sign_transaction(
+ self, spend_tx, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, 0, value, script)
self.sign_tx(tx, spend_tx)
tx.rehash()
return tx
- def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
+ def next_block(self, number, spend=None, additional_coinbase_value=0,
+ script=CScript([OP_TRUE]), solve=True):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
@@ -1140,7 +1149,8 @@
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p()
- def sync_blocks(self, blocks, success=True, reject_reason=None, request_block=True, reconnect=False, timeout=60):
+ def sync_blocks(self, blocks, success=True, reject_reason=None,
+ request_block=True, reconnect=False, timeout=60):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
diff --git a/test/functional/feature_block_sigops.py b/test/functional/feature_block_sigops.py
--- a/test/functional/feature_block_sigops.py
+++ b/test/functional/feature_block_sigops.py
@@ -117,7 +117,8 @@
# \-> b32 (9)
#
- # MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
+ # MULTISIG: each op code counts as 20 sigops. To create the edge case,
+ # pack another 19 sigops at the end.
self.log.info(
"Accept a block with the max number of OP_CHECKMULTISIG sigops")
lots_of_multisigs = CScript(
@@ -315,7 +316,7 @@
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
- # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
+ # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
self.log.info(
"Reject a block containing too many sigops after a large script element")
self.move_tip(72)
@@ -346,7 +347,8 @@
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS_PER_MB+1 before the element
- # b75 succeeds because we put MAX_BLOCK_SIGOPS_PER_MB before the element
+ # b75 succeeds because we put MAX_BLOCK_SIGOPS_PER_MB before the
+ # element
self.log.info(
"Check sigops are counted correctly after an invalid script element")
self.move_tip(72)
@@ -378,7 +380,8 @@
self.sync_blocks([b75], True)
self.save_spendable_output()
- # Check that if we push an element filled with CHECKSIGs, they are not counted
+ # Check that if we push an element filled with CHECKSIGs, they are not
+ # counted
self.move_tip(75)
b76 = self.next_block(76)
size = MAX_BLOCK_SIGOPS_PER_MB - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
@@ -399,10 +402,12 @@
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])):
- return create_tx_with_script(spend_tx, n, amount=value, script_pub_key=script)
+ return create_tx_with_script(
+ spend_tx, n, amount=value, script_pub_key=script)
# sign a transaction, using the key we know about
- # this signs input 0 in tx, which is assumed to be spending output n in spend_tx
+ # this signs input 0 in tx, which is assumed to be spending output n in
+ # spend_tx
def sign_tx(self, tx, spend_tx):
scriptPubKey = bytearray(spend_tx.vout[0].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
@@ -413,13 +418,15 @@
tx.vin[0].scriptSig = CScript(
[self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))])
- def create_and_sign_transaction(self, spend_tx, value, script=CScript([OP_TRUE])):
+ def create_and_sign_transaction(
+ self, spend_tx, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, 0, value, script)
self.sign_tx(tx, spend_tx)
tx.rehash()
return tx
- def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
+ def next_block(self, number, spend=None, additional_coinbase_value=0,
+ script=CScript([OP_TRUE]), solve=True):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
@@ -504,7 +511,8 @@
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p()
- def sync_blocks(self, blocks, success=True, reject_reason=None, request_block=True, reconnect=False, timeout=60):
+ def sync_blocks(self, blocks, success=True, reject_reason=None,
+ request_block=True, reconnect=False, timeout=60):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py
--- a/test/functional/feature_cltv.py
+++ b/test/functional/feature_cltv.py
@@ -144,7 +144,8 @@
fundtx, spendtx = cltv_lock_to_height(
self.nodes[0], fundtx, self.nodeaddress, 49.98)
- # The funding tx only has unexecuted bad CLTV, in scriptpubkey; this is valid.
+ # The funding tx only has unexecuted bad CLTV, in scriptpubkey; this is
+ # valid.
self.nodes[0].p2p.send_and_ping(msg_tx(fundtx))
assert fundtx.hash in self.nodes[0].getrawmempool()
diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py
--- a/test/functional/feature_csv_activation.py
+++ b/test/functional/feature_csv_activation.py
@@ -131,7 +131,8 @@
def send_generic_input_tx(node, coinbases, address):
amount = Decimal("49.99")
- return node.sendrawtransaction(ToHex(sign_transaction(node, create_transaction(node, node.getblock(coinbases.pop())['tx'][0], address, amount))))
+ return node.sendrawtransaction(ToHex(sign_transaction(node, create_transaction(
+ node, node.getblock(coinbases.pop())['tx'][0], address, amount))))
def create_bip68txs(node, bip68inputs, txversion, address, locktime_delta=0):
@@ -151,7 +152,8 @@
return txs
-def create_bip112txs(node, bip112inputs, varyOP_CSV, txversion, address, locktime_delta=0):
+def create_bip112txs(node, bip112inputs, varyOP_CSV,
+ txversion, address, locktime_delta=0):
"""Returns a list of bip112 transactions with different bits set."""
txs = []
assert len(bip112inputs) >= 16
@@ -234,7 +236,8 @@
# Enough to build up to 1000 blocks 10 minutes apart without worrying
# about getting into the future
long_past_time = int(time.time()) - 600 * 1000
- # Enough so that the generated blocks will still all be before long_past_time
+ # Enough so that the generated blocks will still all be before
+ # long_past_time
self.nodes[0].setmocktime(long_past_time - 100)
# 82 blocks generated for inputs
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2 * 32 + 1)
@@ -268,7 +271,8 @@
bip68inputs.append(send_generic_input_tx(
self.nodes[0], self.coinbase_blocks, self.nodeaddress))
- # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
+ # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be
+ # prepended to spending scriptSig)
bip112basicinputs = []
for j in range(2):
inputs = []
@@ -277,7 +281,8 @@
self.nodes[0], self.coinbase_blocks, self.nodeaddress))
bip112basicinputs.append(inputs)
- # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
+ # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP
+ # (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in range(2):
inputs = []
@@ -286,7 +291,8 @@
self.nodes[0], self.coinbase_blocks, self.nodeaddress))
bip112diverseinputs.append(inputs)
- # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
+ # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to
+ # spending scriptSig)
bip112specialinput = send_generic_input_tx(
self.nodes[0], self.coinbase_blocks, self.nodeaddress)
@@ -342,12 +348,14 @@
self.nodes[0], bip112basicinputs[1], False, 1, self.nodeaddress, -1)
bip112txs_vary_nSequence_9_v2 = create_bip112txs(
self.nodes[0], bip112basicinputs[1], False, 2, self.nodeaddress, -1)
- # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
+ # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV
+ # OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = create_bip112txs(
self.nodes[0], bip112diverseinputs[0], True, 1, self.nodeaddress)
bip112txs_vary_OP_CSV_v2 = create_bip112txs(
self.nodes[0], bip112diverseinputs[0], True, 2, self.nodeaddress)
- # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
+ # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV
+ # OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = create_bip112txs(
self.nodes[0], bip112diverseinputs[1], True, 1, self.nodeaddress, -1)
bip112txs_vary_OP_CSV_9_v2 = create_bip112txs(
@@ -502,7 +510,8 @@
# Test #21
self.sync_blocks(test_blocks,)
- # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
+ # Height txs should fail and time txs should now pass 9 * 600 > 10 *
+ # 512
bip68success_txs.extend(bip68timetxs)
# Test #22
self.sync_blocks([self.create_test_block(bip68success_txs)])
@@ -573,7 +582,8 @@
[self.create_test_block_spend_utxos(self.nodes[0], success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
- # SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
+ # SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all
+ # remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch
# or failing the CSV check
@@ -630,9 +640,11 @@
self.sync_blocks([self.create_test_block(spend_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
- # Additional test, of checking that comparison of two time types works properly
+ # Additional test, of checking that comparison of two time types works
+ # properly
time_txs = []
- for tx in [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]:
+ for tx in [tx['tx']
+ for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]:
signtx = sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py
--- a/test/functional/feature_dbcrash.py
+++ b/test/functional/feature_dbcrash.py
@@ -239,19 +239,22 @@
# Sync these blocks with the other nodes
block_hashes_to_sync = []
- for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1):
+ for height in range(initial_height + 1,
+ self.nodes[3].getblockcount() + 1):
block_hashes_to_sync.append(self.nodes[3].getblockhash(height))
self.log.debug("Syncing {} blocks with other nodes".format(
len(block_hashes_to_sync)))
- # Syncing the blocks could cause nodes to crash, so the test begins here.
+ # Syncing the blocks could cause nodes to crash, so the test begins
+ # here.
self.sync_node3blocks(block_hashes_to_sync)
starting_tip_height = self.nodes[3].getblockcount()
# Main test loop:
# each time through the loop, generate a bunch of transactions,
- # and then either mine a single new block on the tip, or some-sized reorg.
+ # and then either mine a single new block on the tip, or some-sized
+ # reorg.
for i in range(40):
self.log.info(
"Iteration {}, generating 2500 transactions {}".format(
diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py
--- a/test/functional/feature_notifications.py
+++ b/test/functional/feature_notifications.py
@@ -41,7 +41,8 @@
block_count = 10
blocks = self.nodes[1].generate(block_count)
- # wait at most 10 seconds for expected file size before reading the content
+ # wait at most 10 seconds for expected file size before reading the
+ # content
wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(
self.block_filename).st_size >= (block_count * 65), timeout=10)
@@ -51,7 +52,8 @@
for l in f.read().splitlines()))
self.log.info("test -walletnotify")
- # wait at most 10 seconds for expected file size before reading the content
+ # wait at most 10 seconds for expected file size before reading the
+ # content
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(
self.tx_filename).st_size >= (block_count * 65), timeout=10)
@@ -89,8 +91,8 @@
self.nodes[0].invalidateblock(invalid_block)
# Give bitcoind 10 seconds to write the alert notification
- wait_until(lambda: os.path.isfile(self.alert_filename)
- and os.path.getsize(self.alert_filename), timeout=10)
+ wait_until(lambda: os.path.isfile(self.alert_filename) and
+ os.path.getsize(self.alert_filename), timeout=10)
self.log.info(self.alert_filename)
with open(self.alert_filename, 'r', encoding='utf8') as f:
diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py
--- a/test/functional/feature_pruning.py
+++ b/test/functional/feature_pruning.py
@@ -80,7 +80,8 @@
def calc_usage(blockdir):
- return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.)
+ return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir)
+ if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.)
class PruneTest(BitcoinTestFramework):
@@ -164,7 +165,8 @@
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
- # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
+ # Node 2 stays connected, so it hears about the stale blocks and
+ # then reorg's when node0 reconnects
disconnect_nodes(self.nodes[0], self.nodes[1])
disconnect_nodes(self.nodes[0], self.nodes[2])
# Mine 24 blocks in node 1
@@ -302,7 +304,8 @@
def height(index):
if use_timestamp:
- return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW
+ return node.getblockheader(node.getblockhash(index))[
+ "time"] + TIMESTAMP_WINDOW
else:
return index
@@ -322,7 +325,8 @@
assert_equal(ret, expected_ret)
def has_block(index):
- return os.path.isfile(os.path.join(self.nodes[node_number].datadir, "regtest", "blocks", "blk{:05}.dat".format(index)))
+ return os.path.isfile(os.path.join(
+ self.nodes[node_number].datadir, "regtest", "blocks", "blk{:05}.dat".format(index)))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight
# (1000)
diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py
--- a/test/functional/interface_rest.py
+++ b/test/functional/interface_rest.py
@@ -49,7 +49,8 @@
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
- def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON, body='', status=200, ret_type=RetType.JSON):
+ def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON,
+ body='', status=200, ret_type=RetType.JSON):
rest_uri = '/rest' + uri
if req_type == ReqType.JSON:
rest_uri += '.json'
@@ -166,7 +167,8 @@
# Do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_obj = self.test_rest_request("/tx/{}".format(txid))
- # Get the spent output to later check for utxo (should be spent by then)
+ # Get the spent output to later check for utxo (should be spent by
+ # then)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
# Get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
@@ -263,7 +265,8 @@
# Compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
- for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot', 'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']:
+ for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot',
+ 'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']:
assert_equal(json_obj[0][key], rpc_block_json[key])
# See if we can get 5 headers in one response
diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py
--- a/test/functional/mempool_accept.py
+++ b/test/functional/mempool_accept.py
@@ -155,13 +155,15 @@
self.log.info(
'A transaction with missing inputs, that existed once in the past')
tx = FromHex(CTransaction(), raw_tx_0)
- # Set vout to 1, to spend the other outpoint (49 coins) of the in-chain-tx we want to double spend
+ # Set vout to 1, to spend the other outpoint (49 coins) of the
+ # in-chain-tx we want to double spend
tx.vin[0].prevout.n = 1
raw_tx_1 = node.signrawtransactionwithwallet(
ToHex(tx))['hex']
txid_1 = node.sendrawtransaction(
hexstring=raw_tx_1, allowhighfees=True)
- # Now spend both to "clearly hide" the outputs, ie. remove the coins from the utxo set by spending them
+ # Now spend both to "clearly hide" the outputs, ie. remove the coins
+ # from the utxo set by spending them
raw_tx_spend_both = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[
{'txid': txid_0, 'vout': 0},
@@ -173,7 +175,8 @@
hexstring=raw_tx_spend_both, allowhighfees=True)
node.generate(1)
self.mempool_size = 0
- # Now see if we can add the coins back to the utxo set by sending the exact txs again
+ # Now see if we can add the coins back to the utxo set by sending the
+ # exact txs again
self.check_mempool_result(
result_expected=[
{'txid': txid_0, 'allowed': False, 'reject-reason': 'missing-inputs'}],
@@ -210,8 +213,8 @@
self.log.info('A really large transaction')
tx = FromHex(CTransaction(), raw_tx_reference)
- tx.vin = [tx.vin[0]] * (1 + MAX_BLOCK_BASE_SIZE //
- len(tx.vin[0].serialize()))
+ tx.vin = [tx.vin[0]] * (1 + MAX_BLOCK_BASE_SIZE
+ // len(tx.vin[0].serialize()))
self.check_mempool_result(
result_expected=[
{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-oversize'}],
@@ -256,7 +259,8 @@
)
self.log.info('A coinbase transaction')
- # Pick the input of the first tx we signed, so it has to be a coinbase tx
+ # Pick the input of the first tx we signed, so it has to be a coinbase
+ # tx
raw_tx_coinbase_spent = node.getrawtransaction(
txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid'])
tx = FromHex(CTransaction(), raw_tx_coinbase_spent)
@@ -331,7 +335,8 @@
self.log.info('A transaction that is locked by BIP68 sequence logic')
tx = FromHex(CTransaction(), raw_tx_reference)
- # We could include it in the second block mined from now, but not the very next one
+ # We could include it in the second block mined from now, but not the
+ # very next one
tx.vin[0].nSequence = 2
# Can skip re-signing the tx because of early rejection
self.check_mempool_result(
diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py
--- a/test/functional/mempool_limit.py
+++ b/test/functional/mempool_limit.py
@@ -76,7 +76,8 @@
inputs = [{"txid": us0["txid"], "vout": us0["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
- # specifically fund this tx with a fee < mempoolminfee, >= than minrelaytxfee
+ # specifically fund this tx with a fee < mempoolminfee, >= than
+ # minrelaytxfee
txF = self.nodes[0].fundrawtransaction(tx, {'feeRate': relayfee})
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
assert_raises_rpc_error(-26, "mempool min fee not met",
diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py
--- a/test/functional/mempool_packages.py
+++ b/test/functional/mempool_packages.py
@@ -31,7 +31,8 @@
# Build a transaction that spends parent_txid:vout
# Return amount sent
- def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):
+ def chain_transaction(self, node, parent_txid, vout,
+ value, fee, num_outputs):
send_value = satoshi_round((value - fee) / num_outputs)
inputs = [{'txid': parent_txid, 'vout': vout}]
outputs = {}
@@ -111,7 +112,8 @@
self.nodes[0].getmempooldescendants(x)))
# Check getmempooldescendants verbose output is correct
- for descendant, dinfo in self.nodes[0].getmempooldescendants(x, True).items():
+ for descendant, dinfo in self.nodes[0].getmempooldescendants(
+ x, True).items():
assert_equal(dinfo['depends'], [
chain[chain.index(descendant) - 1]])
if dinfo['descendantcount'] > 1:
@@ -127,7 +129,8 @@
self.nodes[0].getmempoolancestors(x)))
# Check that getmempoolancestors verbose output is correct
- for ancestor, ainfo in self.nodes[0].getmempoolancestors(x, True).items():
+ for ancestor, ainfo in self.nodes[0].getmempoolancestors(
+ x, True).items():
assert_equal(ainfo['spentby'], [
chain[chain.index(ancestor) + 1]])
if ainfo['ancestorcount'] > 1:
@@ -136,7 +139,8 @@
else:
assert_equal(ainfo['depends'], [])
- # Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
+ # Check that getmempoolancestors/getmempooldescendants correctly handle
+ # verbose=true
v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
assert_equal(len(v_ancestors), len(chain) - 1)
for x in v_ancestors.keys():
@@ -193,7 +197,8 @@
# Keep node1's tip synced with node0
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
- # Now check that the transaction is in the mempool, with the right modified fee
+ # Now check that the transaction is in the mempool, with the right
+ # modified fee
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
@@ -228,7 +233,8 @@
transaction_package.append(
{'txid': txid, 'vout': i, 'amount': sent_value})
- # Sign and send up to MAX_DESCENDANT transactions chained off the parent tx
+ # Sign and send up to MAX_DESCENDANT transactions chained off the
+ # parent tx
for i in range(MAX_DESCENDANTS - 1):
utxo = transaction_package.pop(0)
(txid, sent_value) = self.chain_transaction(
diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py
--- a/test/functional/mempool_persist.py
+++ b/test/functional/mempool_persist.py
@@ -79,7 +79,8 @@
"Verify that node2 calculates its balance correctly "
"after loading wallet transactions.")
self.stop_nodes()
- # Give this one a head-start, so we can be "extra-sure" that it didn't load anything later
+ # Give this one a head-start, so we can be "extra-sure" that it didn't
+ # load anything later
self.start_node(1)
self.start_node(0)
self.start_node(2)
@@ -87,7 +88,8 @@
wait_until(lambda: self.nodes[2].getmempoolinfo()["loaded"], timeout=1)
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[2].getrawmempool()), 5)
- # The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now:
+ # The others have loaded their mempool. If node_1 loaded anything, we'd
+ # probably notice by now:
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify accounting of mempool transactions after restart is correct
@@ -130,7 +132,8 @@
self.log.debug(
"Prevent bitcoind from writing mempool.dat to disk. Verify that `savemempool` fails")
# to test the exception we are creating a tmp folder called mempool.dat.new
- # which is an implementation detail that could change and break this test
+ # which is an implementation detail that could change and break this
+ # test
mempooldotnew1 = mempooldat1 + '.new'
os.mkdir(mempooldotnew1)
assert_raises_rpc_error(-1, "Unable to dump mempool to disk",
diff --git a/test/functional/mempool_reorg.py b/test/functional/mempool_reorg.py
--- a/test/functional/mempool_reorg.py
+++ b/test/functional/mempool_reorg.py
@@ -58,7 +58,8 @@
hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransactionwithwallet(timelock_tx)[
"hex"]
- # This will raise an exception because the timelock transaction is too immature to spend
+ # This will raise an exception because the timelock transaction is too
+ # immature to spend
assert_raises_rpc_error(-26, "bad-txns-nonfinal",
self.nodes[0].sendrawtransaction, timelock_tx)
@@ -94,7 +95,8 @@
for node in self.nodes:
node.invalidateblock(last_block[0])
# Time-locked transaction is now too immature and has been removed from the mempool
- # spend_103_1 has been re-orged out of the chain and is back in the mempool
+ # spend_103_1 has been re-orged out of the chain and is back in the
+ # mempool
assert_equal(set(self.nodes[0].getrawmempool()), {
spend_101_id, spend_102_1_id, spend_103_1_id})
diff --git a/test/functional/mining_basic.py b/test/functional/mining_basic.py
--- a/test/functional/mining_basic.py
+++ b/test/functional/mining_basic.py
@@ -178,7 +178,8 @@
block.solve()
def chain_tip(b_hash, *, status='headers-only', branchlen=1):
- return {'hash': b_hash, 'height': 202, 'branchlen': branchlen, 'status': status}
+ return {'hash': b_hash, 'height': 202,
+ 'branchlen': branchlen, 'status': status}
assert chain_tip(block.hash) not in node.getchaintips()
node.submitheader(hexdata=block.serialize().hex())
@@ -200,7 +201,8 @@
assert_equal(node.submitblock(
hexdata=bad_block_root.serialize().hex()), 'bad-txnmrklroot')
assert chain_tip(bad_block_root.hash) in node.getchaintips()
- # We know the header for this invalid block, so should just return early without error:
+ # We know the header for this invalid block, so should just return
+ # early without error:
node.submitheader(hexdata=CBlockHeader(
bad_block_root).serialize().hex())
assert chain_tip(bad_block_root.hash) in node.getchaintips()
@@ -228,7 +230,8 @@
assert_raises_rpc_error(-25, 'time-too-old', lambda: node.submitheader(
hexdata=CBlockHeader(bad_block_time).serialize().hex()))
- # Should ask for the block from a p2p node, if they announce the header as well:
+ # Should ask for the block from a p2p node, if they announce the header
+ # as well:
node.add_p2p_connection(P2PDataStore())
# Drop the first getheaders
node.p2p.wait_for_getheaders(timeout=5)
diff --git a/test/functional/mining_getblocktemplate_longpoll.py b/test/functional/mining_getblocktemplate_longpoll.py
--- a/test/functional/mining_getblocktemplate_longpoll.py
+++ b/test/functional/mining_getblocktemplate_longpoll.py
@@ -77,7 +77,8 @@
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
- # min_relay_fee is fee per 1000 bytes, which should be more than enough.
+ # min_relay_fee is fee per 1000 bytes, which should be more than
+ # enough.
(txid, txhex, fee) = random_transaction(self.nodes,
Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80
diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py
--- a/test/functional/p2p_compactblocks.py
+++ b/test/functional/p2p_compactblocks.py
@@ -183,7 +183,8 @@
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
- def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
+ def test_sendcmpct(self, node, test_node,
+ preferred_version, old_node=None):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
@@ -365,7 +366,8 @@
self.check_compactblock_construction_from_block(
header_and_shortids, block_hash, block)
- def check_compactblock_construction_from_block(self, header_and_shortids, block_hash, block):
+ def check_compactblock_construction_from_block(
+ self, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
@@ -789,7 +791,8 @@
msg.announce = True
peer.send_and_ping(msg)
- def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
+ def test_compactblock_reconstruction_multiple_peers(
+ self, node, stalling_peer, delivery_peer):
assert len(self.utxos)
def announce_cmpct_block(node, peer):
diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py
--- a/test/functional/p2p_invalid_tx.py
+++ b/test/functional/p2p_invalid_tx.py
@@ -161,13 +161,15 @@
}
# Transactions that do not end up in the mempool
# tx_orphan_no_fee, because it has too low fee (p2ps[0] is not disconnected for relaying that tx)
- # tx_orphan_invaid, because it has negative fee (p2ps[1] is disconnected for relaying that tx)
+ # tx_orphan_invaid, because it has negative fee (p2ps[1] is
+ # disconnected for relaying that tx)
# p2ps[1] is no longer connected
wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12)
assert_equal(expected_mempool, set(node.getrawmempool()))
- # restart node with sending BIP61 messages disabled, check that it disconnects without sending the reject message
+ # restart node with sending BIP61 messages disabled, check that it
+ # disconnects without sending the reject message
self.log.info(
'Test a transaction that is rejected, with BIP61 disabled')
self.restart_node(0, ['-enablebip61=0', '-persistmempool=0'])
diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py
--- a/test/functional/p2p_leak.py
+++ b/test/functional/p2p_leak.py
@@ -90,7 +90,8 @@
class CNodeNoVersionBan(CLazyNode):
# send a bunch of veracks without sending a message. This should get us disconnected.
- # NOTE: implementation-specific check here. Remove if bitcoind ban behavior changes
+ # NOTE: implementation-specific check here. Remove if bitcoind ban
+ # behavior changes
def on_open(self):
super().on_open()
for i in range(banscore):
diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py
--- a/test/functional/p2p_node_network_limited.py
+++ b/test/functional/p2p_node_network_limited.py
@@ -113,7 +113,8 @@
node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
- # because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
+ # because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer,
+ # sync must not be possible
connect_nodes_bi(self.nodes[0], self.nodes[2])
try:
sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
@@ -135,10 +136,12 @@
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generate(10)
- # connect node1 (non pruned) with node0 (pruned) and check if the can sync
+ # connect node1 (non pruned) with node0 (pruned) and check if the can
+ # sync
connect_nodes_bi(self.nodes[0], self.nodes[1])
- # sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
+ # sync must be possible, node 1 is no longer in IBD and should
+ # therefore connect to node 0 (NODE_NETWORK_LIMITED)
sync_blocks([self.nodes[0], self.nodes[1]])
diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py
--- a/test/functional/p2p_sendheaders.py
+++ b/test/functional/p2p_sendheaders.py
@@ -164,7 +164,8 @@
self.block_announced = True
for x in message.headers:
x.calc_sha256()
- # append because headers may be announced over multiple messages.
+ # append because headers may be announced over multiple
+ # messages.
self.recent_headers_announced.append(x.sha256)
self.last_blockhash_announced = message.headers[-1].sha256
@@ -298,7 +299,8 @@
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
- # Try a few different responses; none should affect next announcement
+ # Try a few different responses; none should affect next
+ # announcement
if i == 0:
# first request the block
test_node.send_get_data([tip])
@@ -404,7 +406,8 @@
# getheaders or inv from peer.
for j in range(2):
self.log.debug("Part 3.{}: starting...".format(j))
- # First try mining a reorg that can propagate with header announcement
+ # First try mining a reorg that can propagate with header
+ # announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
inv_node.check_last_inv_announcement(inv=[tip])
@@ -412,7 +415,8 @@
block_time += 8
- # Mine a too-large reorg, which should be announced with a single inv
+ # Mine a too-large reorg, which should be announced with a single
+ # inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
inv_node.check_last_inv_announcement(inv=[tip])
@@ -438,7 +442,8 @@
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
if i == 0:
- # Just get the data -- shouldn't cause headers announcements to resume
+ # Just get the data -- shouldn't cause headers
+ # announcements to resume
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
@@ -600,7 +605,8 @@
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
- # Send a header that doesn't connect, check that we get a getheaders.
+ # Send a header that doesn't connect, check that we get a
+ # getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i]])
@@ -615,7 +621,8 @@
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
- # Send a header that doesn't connect, check that we get a getheaders.
+ # Send a header that doesn't connect, check that we get a
+ # getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i % len(blocks)]])
diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py
--- a/test/functional/p2p_unrequested_blocks.py
+++ b/test/functional/p2p_unrequested_blocks.py
@@ -188,7 +188,8 @@
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
- # the last (height-too-high) on node (as long as it is not missing any headers)
+ # the last (height-too-high) on node (as long as it is not missing any
+ # headers)
tip = block_h3
all_blocks = []
for i in range(288):
@@ -198,7 +199,8 @@
all_blocks.append(next_block)
tip = next_block
- # Now send the block at height 5 and check that it wasn't accepted (missing header)
+ # Now send the block at height 5 and check that it wasn't accepted
+ # (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found",
@@ -206,7 +208,8 @@
assert_raises_rpc_error(-5, "Block not found",
self.nodes[0].getblockheader, all_blocks[1].hash)
- # The block at height 5 should be accepted if we provide the missing header, though
+ # The block at height 5 should be accepted if we provide the missing
+ # header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
@@ -219,7 +222,8 @@
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
- # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
+ # Blocks 1-287 should be accepted, block 288 should be ignored because
+ # it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(
@@ -290,7 +294,8 @@
block_291.sha256, create_coinbase(292), block_291.nTime + 1)
block_292.solve()
- # Now send all the headers on the chain and enough blocks to trigger reorg
+ # Now send all the headers on the chain and enough blocks to trigger
+ # reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
@@ -329,13 +334,15 @@
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
- # We should have failed reorg and switched back to 290 (but have block 291)
+ # We should have failed reorg and switched back to 290 (but have block
+ # 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(
block_291.hash)["confirmations"], -1)
- # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
+ # Now send a new header on the invalid chain, indicating we're forked
+ # off, and expect to get disconnected
block_293 = create_block(
block_292.sha256, create_coinbase(293), block_292.nTime + 1)
block_293.solve()
diff --git a/test/functional/rpc_bind.py b/test/functional/rpc_bind.py
--- a/test/functional/rpc_bind.py
+++ b/test/functional/rpc_bind.py
@@ -69,7 +69,8 @@
self.stop_nodes()
def run_test(self):
- # due to OS-specific network stats queries, this test works only on Linux
+ # due to OS-specific network stats queries, this test works only on
+ # Linux
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on Linux.")
diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py
--- a/test/functional/rpc_blockchain.py
+++ b/test/functional/rpc_blockchain.py
@@ -103,7 +103,8 @@
'status': 'started',
'bit': 28,
'start_time': 0,
- # testdummy does not have a timeout so is set to the max int64 value
+ # testdummy does not have a timeout so is set to the max
+ # int64 value
'timeout': 0x7fffffffffffffff,
'since': 144,
'statistics': {
diff --git a/test/functional/rpc_estimatefee.py b/test/functional/rpc_estimatefee.py
--- a/test/functional/rpc_estimatefee.py
+++ b/test/functional/rpc_estimatefee.py
@@ -28,7 +28,8 @@
# estimatefee is 0.00001 by default, regardless of block contents
assert_equal(default_node.estimatefee(), Decimal('0.00001'))
- # estimatefee may be different for nodes that set it in their config
+ # estimatefee may be different for nodes that set it in their
+ # config
assert_equal(diff_relay_fee_node.estimatefee(), Decimal('0.001'))
# Check the reasonableness of settxfee
diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py
--- a/test/functional/rpc_fundrawtransaction.py
+++ b/test/functional/rpc_fundrawtransaction.py
@@ -63,7 +63,8 @@
self.nodes[0].generate(121)
self.sync_all()
- # ensure that setting changePosition in fundraw with an exact match is handled properly
+ # ensure that setting changePosition in fundraw with an exact match is
+ # handled properly
rawmatch = self.nodes[2].createrawtransaction(
[], {self.nodes[2].getnewaddress(): 50})
rawmatch = self.nodes[2].fundrawtransaction(
diff --git a/test/functional/rpc_getblockstats.py b/test/functional/rpc_getblockstats.py
--- a/test/functional/rpc_getblockstats.py
+++ b/test/functional/rpc_getblockstats.py
@@ -60,7 +60,8 @@
self.setup_clean_chain = True
def get_stats(self):
- return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos + 1)]
+ return [self.nodes[0].getblockstats(
+ hash_or_height=self.start_height + i) for i in range(self.max_stat_pos + 1)]
def generate_test_data(self, filename):
mocktime = time.time()
@@ -108,7 +109,8 @@
self.expected_stats = d['stats']
self.log.info(self.expected_stats)
- # Set the timestamps from the file so that the nodes can get out of Initial Block Download
+ # Set the timestamps from the file so that the nodes can get out of
+ # Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.nodes[1].setmocktime(mocktime)
diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py
--- a/test/functional/rpc_net.py
+++ b/test/functional/rpc_net.py
@@ -107,7 +107,8 @@
def _test_getpeerinfo(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of bidirectional connection between nodes
- # the address bound to on one side will be the source address for the other node
+ # the address bound to on one side will be the source address for the
+ # other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
assert_equal(peer_info[0][0]['minfeefilter'], Decimal("0.00000500"))
diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py
--- a/test/functional/rpc_psbt.py
+++ b/test/functional/rpc_psbt.py
@@ -32,7 +32,8 @@
psbtx1 = self.nodes[0].walletcreatefundedpsbt(
[], {self.nodes[2].getnewaddress(): 10})['psbt']
- # Node 1 should not be able to add anything to it but still return the psbtx same as before
+ # Node 1 should not be able to add anything to it but still return the
+ # psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py
--- a/test/functional/rpc_rawtransaction.py
+++ b/test/functional/rpc_rawtransaction.py
@@ -209,11 +209,13 @@
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
- # We should be able to get the raw transaction by providing the correct block
+ # We should be able to get the raw transaction by providing the correct
+ # block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
- # We should not have the 'in_active_chain' flag when we don't provide a block
+ # We should not have the 'in_active_chain' flag when we don't provide a
+ # block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
@@ -253,7 +255,8 @@
# createmultisig can only take public keys
self.nodes[0].createmultisig(
2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
- # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
+ # addmultisigaddress can take both pubkeys and addresses so long as
+ # they are in the wallet, which is tested here.
assert_raises_rpc_error(-5, "Invalid public key",
self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1])
@@ -487,14 +490,16 @@
# TRANSACTION VERSION NUMBER TESTS #
####################################
- # Test the minimum transaction version number that fits in a signed 32-bit integer.
+ # Test the minimum transaction version number that fits in a signed
+ # 32-bit integer.
tx = CTransaction()
tx.nVersion = -0x80000000
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], -0x80000000)
- # Test the maximum transaction version number that fits in a signed 32-bit integer.
+ # Test the maximum transaction version number that fits in a signed
+ # 32-bit integer.
tx = CTransaction()
tx.nVersion = 0x7fffffff
rawtx = ToHex(tx)
diff --git a/test/functional/rpc_txoutproof.py b/test/functional/rpc_txoutproof.py
--- a/test/functional/rpc_txoutproof.py
+++ b/test/functional/rpc_txoutproof.py
@@ -50,7 +50,8 @@
[node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid2 = self.nodes[0].sendrawtransaction(
self.nodes[0].signrawtransactionwithwallet(tx2)["hex"])
- # This will raise an exception because the transaction is not yet in a block
+ # This will raise an exception because the transaction is not yet in a
+ # block
assert_raises_rpc_error(-5, "Transaction not yet in block",
self.nodes[0].gettxoutproof, [txid1])
@@ -93,7 +94,8 @@
# We can get the proof if the transaction is unspent
assert_equal(self.nodes[2].verifytxoutproof(
self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
- # We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter.
+ # We can get the proof if we provide a list of transactions and one of
+ # them is unspent. The ordering of the list should not matter.
assert_equal(sorted(self.nodes[2].verifytxoutproof(
self.nodes[2].gettxoutproof([txid1, txid2]))), sorted(txlist))
assert_equal(sorted(self.nodes[2].verifytxoutproof(
diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py
--- a/test/functional/test_framework/authproxy.py
+++ b/test/functional/test_framework/authproxy.py
@@ -69,7 +69,8 @@
__id_count = 0
# ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
- def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
+ def __init__(self, service_url, service_name=None,
+ timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
self.__service_url = service_url
self._service_name = service_name
self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
@@ -98,7 +99,8 @@
raise AttributeError
if self._service_name is not None:
name = "{}.{}".format(self._service_name, name)
- return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
+ return AuthServiceProxy(
+ self.__service_url, name, connection=self.__conn)
def _request(self, method, path, postdata):
'''
@@ -191,4 +193,5 @@
return response
def __truediv__(self, relative_uri):
- return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn)
+ return AuthServiceProxy("{}/{}".format(self.__service_url,
+ relative_uri), self._service_name, connection=self.__conn)
diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py
--- a/test/functional/test_framework/blocktools.py
+++ b/test/functional/test_framework/blocktools.py
@@ -93,7 +93,8 @@
return coinbase
-def create_tx_with_script(prevtx, n, script_sig=b"", amount=1, script_pub_key=CScript()):
+def create_tx_with_script(prevtx, n, script_sig=b"",
+ amount=1, script_pub_key=CScript()):
"""Return one-input, one-output transaction object
spending the prevtx's n-th output with the given amount.
@@ -145,7 +146,8 @@
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(fAccurate)
for j in tx.vin:
- # scriptSig might be of type bytes, so convert to CScript for the moment
+ # scriptSig might be of type bytes, so convert to CScript for the
+ # moment
count += CScript(j.scriptSig).GetSigOpCount(fAccurate)
return count
diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py
--- a/test/functional/test_framework/key.py
+++ b/test/functional/test_framework/key.py
@@ -127,11 +127,13 @@
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
- return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
+ return ssl.d2i_ECPrivateKey(ctypes.byref(
+ self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
- return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
+ return ssl.o2i_ECPublicKey(ctypes.byref(
+ self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
@@ -154,7 +156,8 @@
raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')
return ecdh_keybuffer.raw
- def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
+ def get_ecdh_key(self, other_pubkey,
+ kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ecdh_key(other_pubkey)
return kdf(r)
@@ -187,13 +190,15 @@
else:
low_s_value = SECP256K1_ORDER - s_value
low_s_bytes = (low_s_value).to_bytes(33, byteorder='big')
- while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80:
+ while len(
+ low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80:
low_s_bytes = low_s_bytes[1:]
new_s_size = len(low_s_bytes)
new_total_size_byte = (
total_size + new_s_size - s_size).to_bytes(1, byteorder='big')
new_s_size_byte = (new_s_size).to_bytes(1, byteorder='big')
- return b'\x30' + new_total_size_byte + mb_sig.raw[2:5 + r_size] + new_s_size_byte + low_s_bytes
+ return b'\x30' + new_total_size_byte + \
+ mb_sig.raw[2:5 + r_size] + new_s_size_byte + low_s_bytes
def verify(self, hash, sig):
"""Verify a DER signature"""
@@ -240,4 +245,5 @@
return repr(self)
def __repr__(self):
- return '{}({})'.format(self.__class__.__name__, super(CPubKey, self).__repr__())
+ return '{}({})'.format(self.__class__.__name__,
+ super(CPubKey, self).__repr__())
diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py
--- a/test/functional/test_framework/messages.py
+++ b/test/functional/test_framework/messages.py
@@ -34,7 +34,8 @@
# past bip-31 for ping/pong
MY_VERSION = 70014
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
-# from version 70001 onwards, fRelay should be appended to version messages (BIP37)
+# from version 70001 onwards, fRelay should be appended to version
+# messages (BIP37)
MY_RELAY = 1
MAX_INV_SZ = 50000
@@ -791,7 +792,8 @@
return r
def __repr__(self):
- return "CPartialMerkleTree(nTransactions={}, vHash={}, vBits={})".format(self.nTransactions, repr(self.vHash), repr(self.vBits))
+ return "CPartialMerkleTree(nTransactions={}, vHash={}, vBits={})".format(
+ self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock:
@@ -812,7 +814,8 @@
return r
def __repr__(self):
- return "CMerkleBlock(header={}, txn={})".format(repr(self.header), repr(self.txn))
+ return "CMerkleBlock(header={}, txn={})".format(
+ repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
@@ -1199,16 +1202,16 @@
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
- if (self.code != self.REJECT_MALFORMED and
- (self.message == b"block" or self.message == b"tx")):
+ if (self.code != self.REJECT_MALFORMED
+ and (self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
- if (self.code != self.REJECT_MALFORMED and
- (self.message == b"block" or self.message == b"tx")):
+ if (self.code != self.REJECT_MALFORMED
+ and (self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py
--- a/test/functional/test_framework/mininode.py
+++ b/test/functional/test_framework/mininode.py
@@ -103,7 +103,8 @@
def __init__(self):
# The underlying transport of the connection.
- # Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
+ # Should only call methods on this from the NetworkThread, c.f.
+ # call_soon_threadsafe
self._transport = None
@property
@@ -302,7 +303,8 @@
# The network services received from the peer
self.nServices = 0
- def peer_connect(self, *args, services=NODE_NETWORK, send_version=True, **kwargs):
+ def peer_connect(self, *args, services=NODE_NETWORK,
+ send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
@@ -489,7 +491,8 @@
def __init__(self):
super().__init__(name="NetworkThread")
- # There is only one event loop and no more than one thread must be created
+ # There is only one event loop and no more than one thread must be
+ # created
assert not self.network_event_loop
NetworkThread.network_event_loop = asyncio.new_event_loop()
@@ -568,7 +571,8 @@
if response is not None:
self.send_message(response)
- def send_blocks_and_test(self, blocks, node, *, success=True, request_block=True, reject_reason=None, expect_disconnect=False, timeout=60):
+ def send_blocks_and_test(self, blocks, node, *, success=True, request_block=True,
+ reject_reason=None, expect_disconnect=False, timeout=60):
"""Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
@@ -599,12 +603,13 @@
self.sync_with_ping()
if success:
- wait_until(lambda: node.getbestblockhash() ==
- blocks[-1].hash, timeout=timeout)
+ wait_until(lambda: node.getbestblockhash()
+ == blocks[-1].hash, timeout=timeout)
else:
assert node.getbestblockhash() != blocks[-1].hash
- def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None):
+ def send_txs_and_test(self, txs, node, *, success=True,
+ expect_disconnect=False, reject_reason=None):
"""Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py
--- a/test/functional/test_framework/script.py
+++ b/test/functional/test_framework/script.py
@@ -474,7 +474,8 @@
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
- return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
+ return super(CScript, cls).__new__(
+ cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
@@ -639,7 +640,8 @@
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
- return (HASH_ONE, "inIdx {} out of range ({})".format(inIdx, len(txTo.vin)))
+ return (HASH_ONE, "inIdx {} out of range ({})".format(
+ inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
@@ -657,7 +659,8 @@
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
- return (HASH_ONE, "outIdx {} out of range ({})".format(outIdx, len(txtmp.vout)))
+ return (HASH_ONE, "outIdx {} out of range ({})".format(
+ outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
@@ -697,13 +700,15 @@
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
- if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
+ if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f)
+ != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(hash256(serialize_sequence))
- if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
+ if ((hashtype & 0x1f) != SIGHASH_SINGLE and (
+ hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -272,9 +272,11 @@
"""Tests must override this method to define test logic"""
raise NotImplementedError
- # Public helper methods. These can be accessed by the subclass test scripts.
+ # Public helper methods. These can be accessed by the subclass test
+ # scripts.
- def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
+ def add_nodes(self, num_nodes, extra_args=None,
+ *, rpchost=None, binary=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
@@ -388,7 +390,8 @@
sync_blocks(group)
sync_mempools(group)
- # Private helper methods. These should not be accessed by the subclass test scripts.
+ # Private helper methods. These should not be accessed by the subclass
+ # test scripts.
def _start_logging(self):
# Add logger and logging handlers
@@ -406,7 +409,8 @@
ll = int(self.options.loglevel) if self.options.loglevel.isdigit(
) else self.options.loglevel.upper()
ch.setLevel(ll)
- # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
+ # Format logs the same as bitcoind's debug.log with microprecision (so
+ # log files can be concatenated and sorted)
formatter = logging.Formatter(
fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
@@ -505,7 +509,8 @@
self.mocktime = 0
def cache_path(n, *paths):
- return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
+ return os.path.join(get_datadir_path(
+ self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
# Remove empty wallets dir
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -56,7 +56,8 @@
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
- def __init__(self, i, datadir, *, host, rpc_port, p2p_port, timewait, bitcoind, bitcoin_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
+ def __init__(self, i, datadir, *, host, rpc_port, p2p_port, timewait, bitcoind,
+ bitcoin_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
@@ -178,7 +179,8 @@
self.default_args = [def_arg for def_arg in self.default_args
if rm_arg != def_arg and not def_arg.startswith(rm_arg + '=')]
- def start(self, extra_args=None, stdout=None, stderr=None, *args, **kwargs):
+ def start(self, extra_args=None, stdout=None,
+ stderr=None, *args, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
@@ -198,7 +200,8 @@
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
- # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
+ # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are
+ # written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(
@@ -219,7 +222,8 @@
rpc = get_rpc_proxy(rpc_url(self.datadir, self.host, self.rpc_port),
self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
rpc.getblockcount()
- # If the call to getblockcount() succeeds then the RPC connection is up
+ # If the call to getblockcount() succeeds then the RPC
+ # connection is up
self.log.debug("RPC successfully started")
if self.use_cli:
return
@@ -309,11 +313,13 @@
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
- if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
+ if re.search(re.escape(expected_msg), log,
+ flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
- def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
+ def assert_start_raises_init_error(
+ self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to bitcoind
@@ -338,7 +344,8 @@
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
- if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
+ if re.search(expected_msg, stderr,
+ flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
diff --git a/test/functional/test_framework/txtools.py b/test/functional/test_framework/txtools.py
--- a/test/functional/test_framework/txtools.py
+++ b/test/functional/test_framework/txtools.py
@@ -44,7 +44,8 @@
if next_iteration_padding > 0 and next_iteration_padding < extra_bytes:
padding_len += next_iteration_padding
- # If we're at exactly, or below, extra_bytes we don't want a 1 extra byte padding
+ # If we're at exactly, or below, extra_bytes we don't want a 1 extra
+ # byte padding
if padding_len <= extra_bytes:
tx.vout.append(CTxOut(0, CScript([OP_RETURN])))
else:
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -134,7 +134,8 @@
try:
fun(*args, **kwds)
except JSONRPCException as e:
- # JSONRPCException was thrown as expected. Check the code and message values are correct.
+ # JSONRPCException was thrown as expected. Check the code and message
+ # values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError(
"Unexpected JSONRPC error code {}".format(e.error["code"]))
@@ -169,7 +170,8 @@
"String {!r} contains invalid characters for a hash.".format(string))
-def assert_array_result(object_array, to_match, expected, should_not_find=False):
+def assert_array_result(object_array, to_match, expected,
+ should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
@@ -235,7 +237,8 @@
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
-def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
+def wait_until(predicate, *, attempts=float('inf'),
+ timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
@@ -308,11 +311,13 @@
def p2p_port(n):
assert n <= MAX_NODES
- return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
+ return PORT_MIN + n + \
+ (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
- return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
+ return PORT_MIN + PORT_RANGE + n + \
+ (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, host, port):
@@ -392,7 +397,8 @@
def disconnect_nodes(from_node, to_node):
- for peer_id in [peer['id'] for peer in from_node.getpeerinfo() if to_node.name in peer['subver']]:
+ for peer_id in [peer['id'] for peer in from_node.getpeerinfo(
+ ) if to_node.name in peer['subver']]:
try:
from_node.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
@@ -415,8 +421,8 @@
from_node.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
- wait_until(lambda: all(peer['version'] !=
- 0 for peer in from_node.getpeerinfo()))
+ wait_until(lambda: all(peer['version']
+ != 0 for peer in from_node.getpeerinfo()))
def connect_nodes_bi(a, b):
@@ -442,7 +448,8 @@
"".join("\n {!r}".format(b) for b in best_hash)))
-def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
+def sync_mempools(rpc_connections, *, wait=1,
+ timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -58,7 +58,8 @@
TEST_EXIT_SKIPPED = 77
NON_SCRIPTS = [
- # These are python files that live in the functional tests directory, but are not test scripts.
+ # These are python files that live in the functional tests directory, but
+ # are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
@@ -82,7 +83,8 @@
# Used to limit the number of tests, when list of tests is not provided on command line
# When --extended is specified, we run all tests, otherwise
-# we only run a test if its execution time in seconds does not exceed EXTENDED_CUTOFF
+# we only run a test if its execution time in seconds does not exceed
+# EXTENDED_CUTOFF
DEFAULT_EXTENDED_CUTOFF = 40
DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1
@@ -128,11 +130,13 @@
else:
status = "Failed"
- return TestResult(self.test_num, name, testdir, status, int(time.time() - time0), stdout, stderr)
+ return TestResult(self.test_num, name, testdir, status,
+ int(time.time() - time0), stdout, stderr)
def on_ci():
- return os.getenv('TRAVIS') == 'true' or os.getenv('TEAMCITY_VERSION') is not None
+ return os.getenv('TRAVIS') == 'true' or os.getenv(
+ 'TEAMCITY_VERSION') is not None
def main():
@@ -304,7 +308,8 @@
tmpdir, args.jobs, args.testsuitename, args.coverage, passon_args, args.combinedlogslen, build_timings)
-def run_tests(test_list, build_dir, tests_dir, junitoutput, tmpdir, num_jobs, test_suite_name, enable_coverage=False, args=[], combined_logs_len=0, build_timings=None):
+def run_tests(test_list, build_dir, tests_dir, junitoutput, tmpdir, num_jobs, test_suite_name,
+ enable_coverage=False, args=[], combined_logs_len=0, build_timings=None):
# Warn if bitcoind is already running (unix only)
try:
pidofOutput = subprocess.check_output(["pidof", "bitcoind"])
@@ -495,7 +500,8 @@
return test_results
-def print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len):
+def print_results(test_results, tests_dir, max_len_name,
+ runtime, combined_logs_len):
results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format(
"TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
@@ -519,7 +525,11 @@
print('============\n')
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(
tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
- print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
+ print(
+ "\n".join(
+ deque(
+ combined_logs.splitlines(),
+ combined_logs_len)))
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
@@ -594,7 +604,8 @@
return next(
(x['time'] for x in src_timings.existing_timings if x['name'] == test), 0)
- # Some tests must also be run with additional parameters. Add them to the list.
+ # Some tests must also be run with additional parameters. Add them to the
+ # list.
tests_with_params = []
for test_name in test_list:
# always execute a test without parameters
diff --git a/test/functional/wallet_abandonconflict.py b/test/functional/wallet_abandonconflict.py
--- a/test/functional/wallet_abandonconflict.py
+++ b/test/functional/wallet_abandonconflict.py
@@ -66,7 +66,8 @@
assert balance - newbalance <= total_fees(txA, txB, txC)
balance = newbalance
- # Disconnect nodes so node0's transactions don't get into node1's mempool
+ # Disconnect nodes so node0's transactions don't get into node1's
+ # mempool
disconnect_nodes(self.nodes[0], self.nodes[1])
# Identify the 10btc outputs
@@ -111,7 +112,8 @@
outputs = {self.nodes[0].getnewaddress(): signed3_change}
signed3 = self.nodes[0].signrawtransactionwithwallet(
self.nodes[0].createrawtransaction(inputs, outputs))
- # note tx is never directly referenced, only abandoned as a child of the above
+ # note tx is never directly referenced, only abandoned as a child of
+ # the above
self.nodes[0].sendrawtransaction(signed3["hex"])
# In mempool txs from self should increase balance from change
@@ -129,7 +131,8 @@
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Transactions which are not in the mempool should only reduce wallet balance.
- # Transaction inputs should still be spent, but the change not yet received.
+ # Transaction inputs should still be spent, but the change not yet
+ # received.
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - signed3_change)
# Unconfirmed received funds that are not in mempool also shouldn't show
@@ -171,8 +174,8 @@
# Send child tx again so it is no longer abandoned.
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
- assert_equal(newbalance, balance - Decimal("10") -
- Decimal("14.99998") + Decimal("24.9996"))
+ assert_equal(newbalance, balance - Decimal("10")
+ - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Reset to a higher relay fee so that we abandon a transaction
@@ -197,7 +200,8 @@
connect_nodes(self.nodes[0], self.nodes[1])
sync_blocks(self.nodes)
- # Verify that B and C's 10 BCH outputs are available for spending again because AB1 is now conflicted
+ # Verify that B and C's 10 BCH outputs are available for spending again
+ # because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py
--- a/test/functional/wallet_basic.py
+++ b/test/functional/wallet_basic.py
@@ -39,7 +39,8 @@
connect_nodes_bi(self.nodes[0], self.nodes[2])
self.sync_all([self.nodes[0:3]])
- def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
+ def check_fee_amount(self, curr_balance,
+ balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
@@ -304,7 +305,8 @@
# should not be changed because tx was not broadcasted
assert_equal(self.nodes[2].getbalance(), node_2_bal)
- # now broadcast from another node, mine a block, sync, and check the balance
+ # now broadcast from another node, mine a block, sync, and check the
+ # balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all([self.nodes[0:3]])
@@ -330,7 +332,8 @@
sync_blocks(self.nodes[0:3])
node_2_bal += 2
- # tx should be added to balance because after restarting the nodes tx should be broadcasted
+ # tx should be added to balance because after restarting the nodes tx
+ # should be broadcasted
assert_equal(self.nodes[2].getbalance(), node_2_bal)
# send a tx with value in a string (PR#6380 +)
@@ -405,7 +408,8 @@
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].rpc.ensure_ascii = mode
- # unicode check: Basic Multilingual Plane, Supplementary Plane respectively
+ # unicode check: Basic Multilingual Plane, Supplementary Plane
+ # respectively
for label in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getnewaddress()
self.nodes[0].setlabel(addr, label)
@@ -447,7 +451,7 @@
assert_equal(len(self.nodes[0].listsinceblock(
blocks[1])["transactions"]), 0)
- # ==Check that wallet prefers to use coins that don't exceed mempool limits =====
+ # ==Check that wallet prefers to use coins that don't exceed mempool li
# Get all non-zero utxos together
chain_addrs = [self.nodes[0].getnewaddress(
@@ -466,7 +470,8 @@
# Make a long chain of unconfirmed payments without hitting mempool limit
# Each tx we make leaves only one output of change on a chain 1 longer
# Since the amount to send is always much less than the outputs, we only ever need one output
- # So we should be able to generate exactly chainlimit txs for each original output
+ # So we should be able to generate exactly chainlimit txs for each
+ # original output
sending_addr = self.nodes[1].getnewaddress()
txid_list = []
for i in range(chainlimit * 2):
@@ -486,20 +491,23 @@
total_txs = len(self.nodes[0].listtransactions("*", 99999))
# Try with walletrejectlongchains
- # Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
+ # Double chain limit but require combining inputs, so we pass
+ # SelectCoinsMinConf
self.stop_node(0)
self.start_node(0, extra_args=[
"-walletrejectlongchains", "-limitancestorcount=" + str(2 * chainlimit)])
# wait for loadmempool
timeout = 10
- while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit * 2):
+ while (timeout > 0 and len(
+ self.nodes[0].getrawmempool()) < chainlimit * 2):
time.sleep(0.5)
timeout -= 0.5
assert_equal(len(self.nodes[0].getrawmempool()), chainlimit * 2)
node0_balance = self.nodes[0].getbalance()
- # With walletrejectlongchains we will not create the tx and store it in our wallet.
+ # With walletrejectlongchains we will not create the tx and store it in
+ # our wallet.
assert_raises_rpc_error(-4, "Transaction has too long of a mempool chain",
self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
@@ -507,7 +515,8 @@
assert_equal(total_txs, len(
self.nodes[0].listtransactions("*", 99999)))
- # Test getaddressinfo. Note that these addresses are taken from disablewallet.py
+ # Test getaddressinfo. Note that these addresses are taken from
+ # disablewallet.py
assert_raises_rpc_error(-5, "Invalid address",
self.nodes[0].getaddressinfo, "3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy")
address_info = self.nodes[0].getaddressinfo(
diff --git a/test/functional/wallet_hd.py b/test/functional/wallet_hd.py
--- a/test/functional/wallet_hd.py
+++ b/test/functional/wallet_hd.py
@@ -81,7 +81,8 @@
self.log.info("Restore backup ...")
self.stop_node(1)
# we need to delete the complete regtest directory
- # otherwise node1 would auto-recover all funds in flag the keypool keys as used
+ # otherwise node1 would auto-recover all funds in flag the keypool keys
+ # as used
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "blocks"))
shutil.rmtree(os.path.join(
self.nodes[1].datadir, "regtest", "chainstate"))
@@ -132,7 +133,8 @@
assert_equal(out['stop_height'], self.nodes[1].getblockcount())
assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)
- # send a tx and make sure its using the internal chain for the changeoutput
+ # send a tx and make sure its using the internal chain for the
+ # changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(
self.nodes[1].gettransaction(txid)['hex'])['vout']
diff --git a/test/functional/wallet_importmulti.py b/test/functional/wallet_importmulti.py
--- a/test/functional/wallet_importmulti.py
+++ b/test/functional/wallet_importmulti.py
@@ -449,7 +449,8 @@
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
- # Importing existing watch only address with new timestamp should replace saved timestamp.
+ # Importing existing watch only address with new timestamp should
+ # replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
result = self.nodes[1].importmulti([{
@@ -465,7 +466,8 @@
assert_equal(address_assert['timestamp'], timestamp)
watchonly_timestamp = timestamp
- # restart nodes to check for proper serialization/deserialization of watch only address
+ # restart nodes to check for proper serialization/deserialization of
+ # watch only address
self.stop_nodes()
self.start_nodes()
address_assert = self.nodes[1].getaddressinfo(watchonly_address)
diff --git a/test/functional/wallet_keypool.py b/test/functional/wallet_keypool.py
--- a/test/functional/wallet_keypool.py
+++ b/test/functional/wallet_keypool.py
@@ -40,7 +40,8 @@
assert_raises_rpc_error(
-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
- # put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min)
+ # put six (plus 2) new keys in the keypool (100% external-, +100%
+ # internal-keys, 1 in min)
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(6)
nodes[0].walletlock()
diff --git a/test/functional/wallet_listreceivedby.py b/test/functional/wallet_listreceivedby.py
--- a/test/functional/wallet_listreceivedby.py
+++ b/test/functional/wallet_listreceivedby.py
@@ -46,7 +46,8 @@
{"address": addr},
{},
True)
- # Bury Tx under 10 block so it will be returned by listreceivedbyaddress
+ # Bury Tx under 10 block so it will be returned by
+ # listreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
assert_array_result(self.nodes[1].listreceivedbyaddress(),
@@ -123,13 +124,15 @@
balance = self.nodes[1].getreceivedbyaddress(addr, 0)
assert_equal(balance, Decimal("0.1"))
- # Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
+ # Bury Tx under 10 block so it will be returned by the default
+ # getreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
assert_equal(balance, Decimal("0.1"))
- # Trying to getreceivedby for an address the wallet doesn't own should return an error
+ # Trying to getreceivedby for an address the wallet doesn't own should
+ # return an error
assert_raises_rpc_error(-4, "Address not found in wallet",
self.nodes[0].getreceivedbyaddress, addr)
@@ -146,12 +149,14 @@
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
- # listreceivedbylabel should return received_by_label_json because of 0 confirmations
+ # listreceivedbylabel should return received_by_label_json because of 0
+ # confirmations
assert_array_result(self.nodes[1].listreceivedbylabel(),
{"label": label},
received_by_label_json)
- # getreceivedbyaddress should return same balance because of 0 confirmations
+ # getreceivedbyaddress should return same balance because of 0
+ # confirmations
balance = self.nodes[1].getreceivedbylabel(label)
assert_equal(balance, balance_by_label)
diff --git a/test/functional/wallet_listsinceblock.py b/test/functional/wallet_listsinceblock.py
--- a/test/functional/wallet_listsinceblock.py
+++ b/test/functional/wallet_listsinceblock.py
@@ -187,7 +187,8 @@
assert self.nodes[0].gettransaction(
txid1)['txid'] == txid1, "gettransaction failed to find txid1"
- # listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0]
+ # listsinceblock(lastblockhash) should now include txid1, as seen from
+ # nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py
--- a/test/functional/wallet_multiwallet.py
+++ b/test/functional/wallet_multiwallet.py
@@ -122,7 +122,8 @@
w5 = wallet("w5")
w5.generate(1)
- # now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
+ # now if wallets/ exists again, but the rootdir is specified as the
+ # walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-wallet=w4', '-wallet=w5',
'-walletdir=' + data_dir()])
@@ -225,7 +226,8 @@
assert_raises_rpc_error(-4, 'Wallet file verification failed: Error loading wallet w1. Duplicate -wallet filename specified.',
self.nodes[0].loadwallet, wallet_names[0])
- # Fail to load duplicate wallets by different ways (directory and filepath)
+ # Fail to load duplicate wallets by different ways (directory and
+ # filepath)
assert_raises_rpc_error(-4, "Wallet file verification failed: Error loading wallet wallet.dat. Duplicate -wallet filename specified.",
self.nodes[0].loadwallet, 'wallet.dat')
@@ -234,7 +236,8 @@
self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another.
- # Test this twice to make sure that we don't re-introduce https://github.com/bitcoin/bitcoin/issues/14304
+ # Test this twice to make sure that we don't re-introduce
+ # https://github.com/bitcoin/bitcoin/issues/14304
assert_raises_rpc_error(-1, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid",
self.nodes[0].loadwallet, 'w8_copy')
@@ -242,7 +245,8 @@
assert_raises_rpc_error(-4, "Wallet file verification failed: Invalid -wallet path 'w8_symlink'",
self.nodes[0].loadwallet, 'w8_symlink')
- # Fail to load if a directory is specified that doesn't contain a wallet
+ # Fail to load if a directory is specified that doesn't contain a
+ # wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
assert_raises_rpc_error(-18, "Directory empty_wallet_dir does not contain a wallet.dat file",
self.nodes[0].loadwallet, 'empty_wallet_dir')
diff --git a/test/functional/wallet_resendwallettransactions.py b/test/functional/wallet_resendwallettransactions.py
--- a/test/functional/wallet_resendwallettransactions.py
+++ b/test/functional/wallet_resendwallettransactions.py
@@ -21,7 +21,8 @@
assert_raises_rpc_error(-4, "Error: Wallet transaction broadcasting is disabled with -walletbroadcast",
self.nodes[0].resendwallettransactions)
- # Should return an empty array if there aren't unconfirmed wallet transactions.
+ # Should return an empty array if there aren't unconfirmed wallet
+ # transactions.
self.stop_node(0)
self.start_node(0, extra_args=[])
assert_equal(self.nodes[0].resendwallettransactions(), [])
diff --git a/test/functional/wallet_txn_doublespend.py b/test/functional/wallet_txn_doublespend.py
--- a/test/functional/wallet_txn_doublespend.py
+++ b/test/functional/wallet_txn_doublespend.py
@@ -134,7 +134,8 @@
fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
- # Node1's balance should be its initial balance (1250 for 25 block rewards) plus the doublespend:
+ # Node1's balance should be its initial balance (1250 for 25 block
+ # rewards) plus the doublespend:
assert_equal(self.nodes[1].getbalance(), 1250 + 1240)
diff --git a/test/functional/wallet_zapwallettxes.py b/test/functional/wallet_zapwallettxes.py
--- a/test/functional/wallet_zapwallettxes.py
+++ b/test/functional/wallet_zapwallettxes.py
@@ -50,7 +50,8 @@
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
- # Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet.
+ # Stop-start node0. Both confirmed and unconfirmed transactions remain
+ # in the wallet.
self.stop_node(0)
self.start_node(0)
@@ -58,7 +59,8 @@
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed
- # transaction is zapped from the wallet, but is re-added when the mempool is reloaded.
+ # transaction is zapped from the wallet, but is re-added when the
+ # mempool is reloaded.
self.stop_node(0)
self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"])
@@ -78,7 +80,8 @@
# tx1 is still be available because it was confirmed
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
- # This will raise an exception because the unconfirmed transaction has been zapped
+ # This will raise an exception because the unconfirmed transaction has
+ # been zapped
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id',
self.nodes[0].gettransaction, txid2)
diff --git a/test/lint/check-rpc-mappings.py b/test/lint/check-rpc-mappings.py
--- a/test/lint/check-rpc-mappings.py
+++ b/test/lint/check-rpc-mappings.py
@@ -50,7 +50,8 @@
for line in f:
line = line.rstrip()
if not in_rpcs:
- if re.match(r"static const ContextFreeRPCCommand .*\[\] =", line):
+ if re.match(
+ r"static const ContextFreeRPCCommand .*\[\] =", line):
in_rpcs = True
else:
if line.startswith('};'):
diff --git a/test/lint/lint-format-strings.py b/test/lint/lint-format-strings.py
--- a/test/lint/lint-format-strings.py
+++ b/test/lint/lint-format-strings.py
@@ -64,7 +64,8 @@
lines = [re.sub("// .*", " ", line).strip()
for line in source_code.split("\n")
if not line.strip().startswith("#")]
- return re.findall(r"[^a-zA-Z_](?=({}\(.*).*)".format(function_name), " " + " ".join(lines))
+ return re.findall(
+ r"[^a-zA-Z_](?=({}\(.*).*)".format(function_name), " " + " ".join(lines))
def normalize(s):
@@ -230,7 +231,8 @@
n = 0
in_specifier = False
for i, char in enumerate(format_string):
- if format_string[i - 1:i + 1] == "%%" or format_string[i:i + 2] == "%%":
+ if format_string[i - 1:i +
+ 1] == "%%" or format_string[i:i + 2] == "%%":
pass
elif char == "%":
in_specifier = True
@@ -267,8 +269,10 @@
for f in args.file:
file_content = f.read()
- for (function_name, skip_arguments) in FUNCTION_NAMES_AND_NUMBER_OF_LEADING_ARGUMENTS:
- for function_call_str in parse_function_calls(function_name, file_content):
+ for (function_name,
+ skip_arguments) in FUNCTION_NAMES_AND_NUMBER_OF_LEADING_ARGUMENTS:
+ for function_call_str in parse_function_calls(
+ function_name, file_content):
parts = parse_function_call_and_arguments(
function_name, function_call_str)
relevant_function_call_str = unescape("".join(parts))[:512]
diff --git a/test/lint/lint-python-format.py b/test/lint/lint-python-format.py
--- a/test/lint/lint-python-format.py
+++ b/test/lint/lint-python-format.py
@@ -7,7 +7,8 @@
# Lint python format : This program checks that the old python fomatting method
# is not being used (formatting with "string %s" % content).
# The new "{}".format(content) or f"{} content" method should be used instead.
-# Usage of the % formatter is expected to be deprecated by python in the future.
+# Usage of the % formatter is expected to be deprecated by python in the
+# future.
import re
import sys
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sat, Dec 28, 19:35 (7 h, 53 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
4844966
Default Alt Text
D5217.diff (141 KB)
Attached To
D5217: [python linting] apply aggressive mode in autopep8 (line wrapping)
Event Timeline
Log In to Comment