diff --git a/.arclint b/.arclint index 80188b538..3ba0504f8 100644 --- a/.arclint +++ b/.arclint @@ -1,341 +1,341 @@ { "linters": { "generated": { "type": "generated" }, "clang-format": { "type": "clang-format", "version": ">=12.0", "bin": [ "clang-format-12", "clang-format" ], "include": "(^(src|chronik)/.*\\.(h|c|cpp|mm)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "autopep8": { "type": "autopep8", "version": ">=1.3.4", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)", - "(^test/functional/(abc|chronik).*\\.py$)" + "(^test/functional/[a-emt].*\\.py$)" ], "flags": [ "--aggressive", "--ignore=W503,W504", "--max-line-length=88" ] }, "black": { "type": "black", "version": ">=23.0.0", "include": [ - "(^test/functional/(abc|chronik).*\\.py$)" + "(^test/functional/[a-emt].*\\.py$)" ], "flags": [ "--preview" ] }, "flake8": { "type": "flake8", "version": ">=5.0", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ], "flags": [ "--ignore=A003,E203,E303,E305,E501,E704,W503,W504", "--require-plugins=flake8-comprehensions,flake8-builtins" ] }, "lint-format-strings": { "type": "lint-format-strings", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/test/fuzz/strprintf.cpp$)" ] }, "check-doc": { "type": "check-doc", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)" }, "lint-tests": { "type": "lint-tests", "include": "(^src/(seeder/|rpc/|wallet/)?test/.*\\.(cpp)$)" }, "phpcs": { "type": "phpcs", "include": "(\\.php$)", "exclude": [ "(^arcanist/__phutil_library_.+\\.php$)" ], "phpcs.standard": "arcanist/phpcs.xml" }, "lint-locale-dependence": { "type": "lint-locale-dependence", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes/|leveldb/|secp256k1/|tinyformat.h|univalue/))", "(^src/bench/nanobench.h$)" ] }, "lint-cheader": { "type": "lint-cheader", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "spelling": { "type": "spelling", "exclude": [ "(^build-aux/m4/)", "(^depends/)", "(^doc/release-notes/)", "(^contrib/gitian-builder/)", "(^src/(qt/locale|secp256k1|univalue|leveldb)/)", "(^test/lint/dictionary/)", "(package-lock.json)" ], "spelling.dictionaries": [ "test/lint/dictionary/english.json" ] }, "lint-assert-with-side-effects": { "type": "lint-assert-with-side-effects", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-include-quotes": { "type": "lint-include-quotes", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-include-guard": { "type": "lint-include-guard", "include": "(^(src|chronik)/.*\\.h$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/tinyformat.h$)" ] }, "lint-include-source": { "type": "lint-include-source", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-std-chrono": { "type": "lint-std-chrono", "include": "(^(src|chronik)/.*\\.(h|cpp)$)" }, "lint-stdint": { "type": "lint-stdint", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/compat/assumptions.h$)" ] }, "lint-source-filename": { "type": "lint-source-filename", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-boost-dependencies": { "type": "lint-boost-dependencies", "include": "(^(src|chronik)/.*\\.(h|cpp)$)" }, "lint-python-encoding": { "type": "lint-python-encoding", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "lint-python-shebang": { "type": "lint-python-shebang", "include": "(\\.py$)", "exclude": [ "(__init__\\.py$)", "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "lint-bash-shebang": { "type": "lint-bash-shebang", "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)" ] }, "shellcheck": { "type": "shellcheck", "version": ">=0.7.0", "flags": [ "--external-sources", "--source-path=SCRIPTDIR" ], "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue)/)" ] }, "lint-shell-locale": { "type": "lint-shell-locale", "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue)/)", "(^cmake/utils/log-and-print-on-failure.sh)" ] }, "lint-cpp-void-parameters": { "type": "lint-cpp-void-parameters", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/compat/glibc_compat.cpp$)" ] }, "lint-logs": { "type": "lint-logs", "include": "(^(src|chronik)/.*\\.(h|cpp|rs)$)" }, "lint-qt": { "type": "lint-qt", "include": "(^src/qt/.*\\.(h|cpp)$)", "exclude": [ "(^src/qt/(locale|forms|res)/)" ] }, "lint-doxygen": { "type": "lint-doxygen", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-whitespace": { "type": "lint-whitespace", "include": "(\\.(ac|am|cmake|conf|in|include|json|m4|md|openrc|php|pl|rs|sh|txt|yml)$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "yamllint": { "type": "yamllint", "include": "(\\.(yml|yaml)$)", "exclude": "(^src/(secp256k1|univalue|leveldb)/)" }, "lint-check-nonfatal": { "type": "lint-check-nonfatal", "include": [ "(^src/rpc/.*\\.(h|c|cpp)$)", "(^src/wallet/rpc*.*\\.(h|c|cpp)$)" ], "exclude": "(^src/rpc/server.cpp)" }, "lint-markdown": { "type": "lint-markdown", "include": [ "(\\.md$)" ], "exclude": "(^contrib/gitian-builder/)" }, "lint-python-mypy": { "type": "lint-python-mypy", "version": ">=0.910", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)", "(^contrib/macdeploy/)" ], "flags": [ "--ignore-missing-imports", "--install-types", "--non-interactive" ] }, "lint-python-mutable-default": { "type": "lint-python-mutable-default", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "prettier": { "type": "prettier", "version": ">=2.6.0", "include": [ "(^cashtab/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)", "(^web/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)" ], "exclude": "(^web/.*/translations/.*\\.json$)" }, "lint-python-isort": { "type": "lint-python-isort", "version": ">=5.6.4", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "rustfmt": { "type": "rustfmt", "version": ">=1.5.1", "include": "(\\.rs$)" }, "eslint": { "type": "eslint", "version": ">=8.0.0", "include": [ "(cashtab/.*\\.js$)", "(apps/alias-server/.*\\.js$)", "(modules/ecashaddrjs/.*\\.js$)", "(apps/ecash-herald/.*\\.js$)", "(modules/chronik-client/.*\\.(js|jsx|ts|tsx)$)" ] }, "lint-python-flynt": { "type": "lint-python-flynt", "version": ">=0.78", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] } } } diff --git a/test/functional/combine_logs.py b/test/functional/combine_logs.py index 4d769d1c6..ee9beb758 100755 --- a/test/functional/combine_logs.py +++ b/test/functional/combine_logs.py @@ -1,220 +1,241 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2020 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Combine logs from multiple bitcoin nodes as well as the test_framework log. This streams the combined log output to stdout. Use combine_logs.py > outputfile to write to an outputfile. If no argument is provided, the most recent test directory will be used.""" import argparse import heapq import itertools import os import pathlib import re import sys import tempfile from collections import defaultdict, namedtuple # N.B.: don't import any local modules here - this script must remain executable # without the parent module installed. # Should match same symbol in `test_framework.test_framework`. TMPDIR_PREFIX = "bitcoin_func_test_" # Matches on the date format at the start of the log event -TIMESTAMP_PATTERN = re.compile( - r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?Z") +TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?Z") -LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event']) +LogEvent = namedtuple("LogEvent", ["timestamp", "source", "event"]) def main(): """Main function. Parses args, reads the log files and renders them as text or html.""" parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + description=__doc__, formatter_class=argparse.RawTextHelpFormatter + ) parser.add_argument( - 'testdir', nargs='?', default='', - help='temporary test directory to combine logs from. ' - 'Defaults to the most recent') - parser.add_argument('-c', '--color', dest='color', action='store_true', - help='outputs the combined log with events colored by ' - 'source (requires posix terminal colors. Use less' - ' -r for viewing)') - parser.add_argument('--html', dest='html', action='store_true', - help='outputs the combined log as html. ' - 'Requires jinja2. pip install jinja2') + "testdir", + nargs="?", + default="", + help=( + "temporary test directory to combine logs from. Defaults to the most recent" + ), + ) + parser.add_argument( + "-c", + "--color", + dest="color", + action="store_true", + help=( + "outputs the combined log with events colored by " + "source (requires posix terminal colors. Use less" + " -r for viewing)" + ), + ) + parser.add_argument( + "--html", + dest="html", + action="store_true", + help="outputs the combined log as html. Requires jinja2. pip install jinja2", + ) args = parser.parse_args() if args.html and args.color: print("Only one out of --color or --html should be specified") sys.exit(1) testdir = args.testdir or find_latest_test_dir() if not testdir: print("No test directories found") sys.exit(1) if not args.testdir: - print(f"Opening latest test directory: {testdir}", - file=sys.stderr) + print(f"Opening latest test directory: {testdir}", file=sys.stderr) - colors = defaultdict(lambda: '') + colors = defaultdict(lambda: "") if args.color: colors["test"] = "\033[0;36m" # CYAN colors["node0"] = "\033[0;34m" # BLUE colors["node1"] = "\033[0;32m" # GREEN colors["node2"] = "\033[0;31m" # RED colors["node3"] = "\033[0;33m" # YELLOW colors["reset"] = "\033[0m" # Reset font color log_events = read_logs(testdir) if args.html: print_logs_html(log_events) else: print_logs_plain(log_events, colors) print_node_warnings(testdir, colors) def read_logs(tmp_dir): """Reads log files. Delegates to generator function get_log_events() to provide individual log events for each of the input log files.""" # Find out what the folder is called that holds the debug.log file - glob = pathlib.Path(tmp_dir).glob('node0/**/debug.log') + glob = pathlib.Path(tmp_dir).glob("node0/**/debug.log") path = next(glob, None) if path: # more than one debug.log should never happen assert next(glob, None) is None # extract the chain name - chain = re.search(r'node0/(.+?)/debug\.log$', - path.as_posix()).group(1) + chain = re.search(r"node0/(.+?)/debug\.log$", path.as_posix()).group(1) else: # fallback to regtest (should only happen when none exists) - chain = 'regtest' + chain = "regtest" files = [("test", f"{tmp_dir}/test_framework.log")] for i in itertools.count(): logfile = f"{tmp_dir}/node{i}/{chain}/debug.log" if not os.path.isfile(logfile): break files.append((f"node{i}", logfile)) return heapq.merge(*[get_log_events(source, f) for source, f in files]) def print_node_warnings(tmp_dir, colors): """Print nodes' errors and warnings""" warnings = [] - for stream in ['stdout', 'stderr']: + for stream in ["stdout", "stderr"]: for i in itertools.count(): folder = f"{tmp_dir}/node{i}/{stream}" if not os.path.isdir(folder): break - for (_, _, fns) in os.walk(folder): + for _, _, fns in os.walk(folder): for fn in fns: - warning = pathlib.Path( - f'{folder}/{fn}').read_text().strip() + warning = pathlib.Path(f"{folder}/{fn}").read_text().strip() if warning: - warnings.append((f"node{i} {stream}", - warning)) + warnings.append((f"node{i} {stream}", warning)) print() for w in warnings: print(f"{colors[w[0].split()[0]]} {w[0]} {w[1]} {colors['reset']}") def find_latest_test_dir(): """Returns the latest tmpfile test directory prefix.""" tmpdir = tempfile.gettempdir() def join_tmp(basename): return os.path.join(tmpdir, basename) def is_valid_test_tmpdir(basename): fullpath = join_tmp(basename) return ( os.path.isdir(fullpath) and basename.startswith(TMPDIR_PREFIX) and os.access(fullpath, os.R_OK) ) - testdir_paths = [join_tmp(name) for name in os.listdir(tmpdir) - if is_valid_test_tmpdir(name)] + testdir_paths = [ + join_tmp(name) for name in os.listdir(tmpdir) if is_valid_test_tmpdir(name) + ] return max(testdir_paths, key=os.path.getmtime) if testdir_paths else None def get_log_events(source, logfile): """Generator function that returns individual log events. Log events may be split over multiple lines. We use the timestamp regex match as the marker for a new log event.""" try: - with open(logfile, 'r', encoding='utf-8') as infile: - event = '' - timestamp = '' + with open(logfile, "r", encoding="utf-8") as infile: + event = "" + timestamp = "" for line in infile: # skip blank lines - if line == '\n': + if line == "\n": continue # if this line has a timestamp, it's the start of a new log # event. time_match = TIMESTAMP_PATTERN.match(line) if time_match: if event: - yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip()) + yield LogEvent( + timestamp=timestamp, source=source, event=event.rstrip() + ) timestamp = time_match.group() if time_match.group(1) is None: # timestamp does not have microseconds. Add zeroes. timestamp_micro = timestamp.replace("Z", ".000000Z") line = line.replace(timestamp, timestamp_micro) timestamp = timestamp_micro event = line # if it doesn't have a timestamp, it's a continuation line of # the previous log. else: # Add the line. Prefix with space equivalent to the source # + timestamp so log lines are aligned event += f" {line}" # Flush the final event yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip()) except FileNotFoundError: - print(f"File {logfile} could not be opened. Continuing without it.", - file=sys.stderr) + print( + f"File {logfile} could not be opened. Continuing without it.", + file=sys.stderr, + ) def print_logs_plain(log_events, colors): """Renders the iterator of log events into text.""" for event in log_events: lines = event.event.splitlines() - print(f"{colors[event.source.rstrip()]} {event.source: <5} " - f"{lines[0]} {colors['reset']}") + print( + f"{colors[event.source.rstrip()]} {event.source: <5} " + f"{lines[0]} {colors['reset']}" + ) if len(lines) > 1: for line in lines[1:]: print(f"{colors[event.source.rstrip()]}{line}{colors['reset']}") def print_logs_html(log_events): """Renders the iterator of log events into html.""" try: import jinja2 except ImportError: print("jinja2 not found. Try `pip install jinja2`") sys.exit(1) - print(jinja2.Environment(loader=jinja2.FileSystemLoader('./')) - .get_template('combined_log_template.html') - .render(title="Combined Logs from testcase", - log_events=[event._asdict() for event in log_events])) + print( + jinja2.Environment(loader=jinja2.FileSystemLoader("./")) + .get_template("combined_log_template.html") + .render( + title="Combined Logs from testcase", + log_events=[event._asdict() for event in log_events], + ) + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/test/functional/create_cache.py b/test/functional/create_cache.py index 0fdd5c607..052ea57cf 100755 --- a/test/functional/create_cache.py +++ b/test/functional/create_cache.py @@ -1,29 +1,29 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Creating a cache of the blockchain speeds up test execution when running multiple functional tests. This helper script is executed by test_runner when multiple tests are being run in parallel. """ from test_framework.test_framework import BitcoinTestFramework class CreateCache(BitcoinTestFramework): # Test network and test nodes are not required: def set_test_params(self): self.num_nodes = 0 def setup_network(self): pass def run_test(self): pass -if __name__ == '__main__': +if __name__ == "__main__": CreateCache().main() diff --git a/test/functional/example_test.py b/test/functional/example_test.py index 31f8aeb8c..8da14248d 100755 --- a/test/functional/example_test.py +++ b/test/functional/example_test.py @@ -1,226 +1,233 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """An example functional test The module-level docstring should include a high-level description of what the test is doing. It's the first thing people see when they open the file and should give the reader information about *what* the test is testing and *how* it's being tested """ # Imports should be in PEP8 ordering (std library first, then third party # libraries then local imports). from collections import defaultdict # Avoid wildcard * imports if possible from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import MSG_BLOCK, CInv, msg_block, msg_getdata from test_framework.p2p import P2PInterface, p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal # P2PInterface is a class containing callbacks to be executed when a P2P # message is received from the node-under-test. Subclass P2PInterface and # override the on_*() methods if you need custom behaviour. class BaseNode(P2PInterface): def __init__(self): """Initialize the P2PInterface Used to initialize custom properties for the Node that aren't included by default in the base class. Be aware that the P2PInterface base class already stores a counter for each P2P message type and the last received message of each type, which should be sufficient for the needs of most tests. Call super().__init__() first for standard initialization and then initialize custom properties.""" super().__init__() # Stores a dictionary of all blocks received self.block_receive_map = defaultdict(int) def on_block(self, message): """Override the standard on_block callback Store the hash of a received block in the dictionary.""" message.block.calc_sha256() self.block_receive_map[message.block.sha256] += 1 def on_inv(self, message): """Override the standard on_inv callback""" pass def custom_function(): """Do some custom behaviour If this function is more generally useful for other tests, consider moving it to a module in test_framework.""" # self.log.info("running custom_function") # Oops! Can't run self.log # outside the BitcoinTestFramework pass class ExampleTest(BitcoinTestFramework): # Each functional test is a subclass of the BitcoinTestFramework class. # Override the set_test_params(), skip_test_if_missing_module(), add_options(), setup_chain(), setup_network() # and setup_nodes() methods to customize the test setup as required. def set_test_params(self): """Override test parameters for your individual test. This method must be overridden and num_nodes must be explicitly set.""" # By default every test loads a pre-mined chain of 200 blocks from cache. # Set setup_clean_chain to True to skip this and start from the Genesis # block. self.setup_clean_chain = True self.num_nodes = 3 # Use self.extra_args to change command-line arguments for the nodes self.extra_args = [[], ["-logips"], []] # self.log.info("I've finished set_test_params") # Oops! Can't run # self.log before run_test() # Use skip_test_if_missing_module() to skip the test if your test requires certain modules to be present. # This test uses generate which requires wallet to be compiled def skip_test_if_missing_module(self): self.skip_if_no_wallet() # Use add_options() to add specific command-line options for your test. # In practice this is not used very much, since the tests are mostly written # to be run in automated environments without command-line options. # def add_options() # pass # Use setup_chain() to customize the node data directories. In practice # this is not used very much since the default behaviour is almost always # fine # def setup_chain(): # pass def setup_network(self): """Setup the test network topology Often you won't need to override this, since the standard network topology (linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests. If you do override this method, remember to start the nodes, assign them to self.nodes, connect them and then sync.""" self.setup_nodes() # In this test, we're not connecting node2 to node0 or node1. Calls to # sync_all() should not include node2, since we're not expecting it to # sync. self.connect_nodes(0, 1) self.sync_all(self.nodes[0:2]) # Use setup_nodes() to customize the node start behaviour (for example if # you don't want to start all nodes at the start of the test). # def setup_nodes(): # pass def custom_method(self): """Do some custom behaviour for this test Define it in a method here because you're going to use it repeatedly. If you think it's useful in general, consider moving it to the base BitcoinTestFramework class so other tests can use it.""" self.log.info("Running custom_method") def run_test(self): """Main test logic""" # Create P2P connections will wait for a verack to make sure the # connection is fully up peer_messaging = self.nodes[0].add_p2p_connection(BaseNode()) # Generating a block on one of the nodes will get us out of IBD - blocks = [int(self.generate(self.nodes[0], sync_fun=lambda: self.sync_all( - self.nodes[0:2]), nblocks=1)[0], 16)] + blocks = [ + int( + self.generate( + self.nodes[0], + sync_fun=lambda: self.sync_all(self.nodes[0:2]), + nblocks=1, + )[0], + 16, + ) + ] # Notice above how we called an RPC by calling a method with the same # name on the node object. Notice also how we used a keyword argument # to specify a named RPC argument. Neither of those are defined on the # node object. Instead there's some __getattr__() magic going on under # the covers to dispatch unrecognised attribute calls to the RPC # interface. # Logs are nice. Do plenty of them. They can be used in place of comments for # breaking the test into sub-sections. self.log.info("Starting test!") self.log.info("Calling a custom function") custom_function() self.log.info("Calling a custom method") self.custom_method() self.log.info("Create some blocks") self.tip = int(self.nodes[0].getbestblockhash(), 16) - self.block_time = self.nodes[0].getblock( - self.nodes[0].getbestblockhash())['time'] + 1 + self.block_time = ( + self.nodes[0].getblock(self.nodes[0].getbestblockhash())["time"] + 1 + ) height = self.nodes[0].getblockcount() for _ in range(10): # Use the blocktools functionality to manually build a block. # Calling the generate() rpc is easier, but this allows us to exactly # control the blocks and transactions. - block = create_block( - self.tip, create_coinbase( - height + 1), self.block_time) + block = create_block(self.tip, create_coinbase(height + 1), self.block_time) block.solve() block_message = msg_block(block) # Send message is used to send a P2P message to the node over our # P2PInterface peer_messaging.send_message(block_message) self.tip = block.sha256 blocks.append(self.tip) self.block_time += 1 height += 1 - self.log.info( - "Wait for node1 to reach current tip (height 11) using RPC") + self.log.info("Wait for node1 to reach current tip (height 11) using RPC") self.nodes[1].waitforblockheight(11) self.log.info("Connect node2 and node1") self.connect_nodes(1, 2) self.log.info("Wait for node2 to receive all the blocks from node1") self.sync_all() self.log.info("Add P2P connection to node2") self.nodes[0].disconnect_p2ps() peer_receiving = self.nodes[2].add_p2p_connection(BaseNode()) self.log.info("Test that node2 propagates all the blocks to us") getdata_request = msg_getdata() for block in blocks: getdata_request.inv.append(CInv(MSG_BLOCK, block)) peer_receiving.send_message(getdata_request) # wait_until() will loop until a predicate condition is met. Use it to test properties of the # P2PInterface objects. peer_receiving.wait_until( lambda: sorted(blocks) == sorted(peer_receiving.block_receive_map.keys()), - timeout=5) + timeout=5, + ) self.log.info("Check that each block was received only once") # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking # and synchronization issues. Note p2p.wait_until() acquires this # global lock internally when testing the predicate. with p2p_lock: for block in peer_receiving.block_receive_map.values(): assert_equal(block, 1) -if __name__ == '__main__': +if __name__ == "__main__": ExampleTest().main() diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py index ad4722215..c38480d73 100755 --- a/test/functional/mempool_accept.py +++ b/test/functional/mempool_accept.py @@ -1,400 +1,534 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test mempool acceptance of raw transactions.""" from decimal import Decimal from test_framework.key import ECKey from test_framework.messages import ( MAX_BLOCK_BASE_SIZE, MAX_MONEY, XEC, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, ToHex, ) from test_framework.script import ( OP_0, OP_2, OP_3, OP_CHECKMULTISIG, OP_EQUAL, OP_HASH160, OP_RETURN, CScript, hash160, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error class MempoolAcceptanceTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 - self.extra_args = [[ - '-txindex', - '-acceptnonstdtxn=0', # Try to mimic main-net - '-permitbaremultisig=0', - ]] * self.num_nodes + self.extra_args = [ + [ + "-txindex", + "-acceptnonstdtxn=0", # Try to mimic main-net + "-permitbaremultisig=0", + ] + ] * self.num_nodes self.supports_cli = False def skip_test_if_missing_module(self): self.skip_if_no_wallet() def check_mempool_result(self, result_expected, *args, **kwargs): """Wrapper to check result of testmempoolaccept on node_0's mempool""" result_test = self.nodes[0].testmempoolaccept(*args, **kwargs) assert_equal(result_expected, result_test) # Must not change mempool state - assert_equal(self.nodes[0].getmempoolinfo()['size'], self.mempool_size) + assert_equal(self.nodes[0].getmempoolinfo()["size"], self.mempool_size) def run_test(self): node = self.nodes[0] - self.log.info('Start with empty mempool, and 200 blocks') + self.log.info("Start with empty mempool, and 200 blocks") self.mempool_size = 0 assert_equal(node.getblockcount(), 200) - assert_equal(node.getmempoolinfo()['size'], self.mempool_size) + assert_equal(node.getmempoolinfo()["size"], self.mempool_size) coins = node.listunspent() - self.log.info('Should not accept garbage to testmempoolaccept') - assert_raises_rpc_error(-3, 'Expected type array, got string', - lambda: node.testmempoolaccept(rawtxs='ff00baar')) - assert_raises_rpc_error(-8, 'Array must contain between 1 and 50 transactions.', - lambda: node.testmempoolaccept(rawtxs=['ff22'] * 51)) - assert_raises_rpc_error(-8, 'Array must contain between 1 and 50 transactions.', - lambda: node.testmempoolaccept(rawtxs=[])) - assert_raises_rpc_error(-22, 'TX decode failed', - lambda: node.testmempoolaccept(rawtxs=['ff00baar'])) + self.log.info("Should not accept garbage to testmempoolaccept") + assert_raises_rpc_error( + -3, + "Expected type array, got string", + lambda: node.testmempoolaccept(rawtxs="ff00baar"), + ) + assert_raises_rpc_error( + -8, + "Array must contain between 1 and 50 transactions.", + lambda: node.testmempoolaccept(rawtxs=["ff22"] * 51), + ) + assert_raises_rpc_error( + -8, + "Array must contain between 1 and 50 transactions.", + lambda: node.testmempoolaccept(rawtxs=[]), + ) + assert_raises_rpc_error( + -22, "TX decode failed", lambda: node.testmempoolaccept(rawtxs=["ff00baar"]) + ) - self.log.info('A transaction already in the blockchain') + self.log.info("A transaction already in the blockchain") # Pick a random coin(base) to spend coin = coins.pop() - raw_tx_in_block = node.signrawtransactionwithwallet(node.createrawtransaction( - inputs=[{'txid': coin['txid'], 'vout': coin['vout']}], - outputs=[{node.getnewaddress(): 300000}, - {node.getnewaddress(): 49000000}], - ))['hex'] - txid_in_block = node.sendrawtransaction( - hexstring=raw_tx_in_block, maxfeerate=0) + raw_tx_in_block = node.signrawtransactionwithwallet( + node.createrawtransaction( + inputs=[{"txid": coin["txid"], "vout": coin["vout"]}], + outputs=[ + {node.getnewaddress(): 300000}, + {node.getnewaddress(): 49000000}, + ], + ) + )["hex"] + txid_in_block = node.sendrawtransaction(hexstring=raw_tx_in_block, maxfeerate=0) self.generate(node, 1) self.mempool_size = 0 self.check_mempool_result( - result_expected=[{'txid': txid_in_block, 'allowed': False, - 'reject-reason': 'txn-already-known'}], + result_expected=[ + { + "txid": txid_in_block, + "allowed": False, + "reject-reason": "txn-already-known", + } + ], rawtxs=[raw_tx_in_block], ) - self.log.info('A transaction not in the mempool') + self.log.info("A transaction not in the mempool") fee = Decimal("7.00") - raw_tx_0 = node.signrawtransactionwithwallet(node.createrawtransaction( - inputs=[{"txid": txid_in_block, "vout": 0, - "sequence": 0xfffffffd}], - outputs=[{node.getnewaddress(): Decimal(300_000) - fee}], - ))['hex'] + raw_tx_0 = node.signrawtransactionwithwallet( + node.createrawtransaction( + inputs=[{"txid": txid_in_block, "vout": 0, "sequence": 0xFFFFFFFD}], + outputs=[{node.getnewaddress(): Decimal(300_000) - fee}], + ) + )["hex"] tx = FromHex(CTransaction(), raw_tx_0) txid_0 = tx.rehash() self.check_mempool_result( - result_expected=[{'txid': txid_0, 'allowed': True, - 'size': tx.billable_size(), - 'fees': {'base': fee}}], + result_expected=[ + { + "txid": txid_0, + "allowed": True, + "size": tx.billable_size(), + "fees": {"base": fee}, + } + ], rawtxs=[raw_tx_0], ) - self.log.info('A final transaction not in the mempool') + self.log.info("A final transaction not in the mempool") # Pick a random coin(base) to spend coin = coins.pop() output_amount = Decimal(25_000) - raw_tx_final = node.signrawtransactionwithwallet(node.createrawtransaction( - inputs=[{'txid': coin['txid'], 'vout': coin['vout'], - "sequence": 0xffffffff}], # SEQUENCE_FINAL - outputs=[{node.getnewaddress(): output_amount}], - locktime=node.getblockcount() + 2000, # Can be anything - ))['hex'] + raw_tx_final = node.signrawtransactionwithwallet( + node.createrawtransaction( + inputs=[ + {"txid": coin["txid"], "vout": coin["vout"], "sequence": 0xFFFFFFFF} + ], # SEQUENCE_FINAL + outputs=[{node.getnewaddress(): output_amount}], + locktime=node.getblockcount() + 2000, # Can be anything + ) + )["hex"] tx = FromHex(CTransaction(), raw_tx_final) - fee_expected = coin['amount'] - output_amount + fee_expected = coin["amount"] - output_amount self.check_mempool_result( - result_expected=[{'txid': tx.rehash(), 'allowed': True, - 'size': tx.billable_size(), - 'fees': {'base': fee_expected}}], + result_expected=[ + { + "txid": tx.rehash(), + "allowed": True, + "size": tx.billable_size(), + "fees": {"base": fee_expected}, + } + ], rawtxs=[tx.serialize().hex()], maxfeerate=0, ) node.sendrawtransaction(hexstring=raw_tx_final, maxfeerate=0) self.mempool_size += 1 - self.log.info('A transaction in the mempool') + self.log.info("A transaction in the mempool") node.sendrawtransaction(hexstring=raw_tx_0) self.mempool_size += 1 self.check_mempool_result( - result_expected=[{'txid': txid_0, 'allowed': False, - 'reject-reason': 'txn-already-in-mempool'}], + result_expected=[ + { + "txid": txid_0, + "allowed": False, + "reject-reason": "txn-already-in-mempool", + } + ], rawtxs=[raw_tx_0], ) # Removed RBF test # self.log.info('A transaction that replaces a mempool transaction') # ... - self.log.info('A transaction that conflicts with an unconfirmed tx') + self.log.info("A transaction that conflicts with an unconfirmed tx") # Send the transaction that conflicts with the mempool transaction node.sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0) # take original raw_tx_0 tx = FromHex(CTransaction(), raw_tx_0) tx.vout[0].nValue -= int(4 * fee * XEC) # Set more fee # skip re-signing the tx self.check_mempool_result( - result_expected=[{'txid': tx.rehash(), - 'allowed': False, - 'reject-reason': 'txn-mempool-conflict'}], + result_expected=[ + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "txn-mempool-conflict", + } + ], rawtxs=[tx.serialize().hex()], maxfeerate=0, ) - self.log.info('A transaction with missing inputs, that never existed') + self.log.info("A transaction with missing inputs, that never existed") tx = FromHex(CTransaction(), raw_tx_0) - tx.vin[0].prevout = COutPoint(txid=int('ff' * 32, 16), n=14) + tx.vin[0].prevout = COutPoint(txid=int("ff" * 32, 16), n=14) # skip re-signing the tx self.check_mempool_result( result_expected=[ - {'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'missing-inputs'}], + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "missing-inputs", + } + ], rawtxs=[ToHex(tx)], ) self.log.info( - 'A transaction with missing inputs, that existed once in the past') + "A transaction with missing inputs, that existed once in the past" + ) tx = FromHex(CTransaction(), raw_tx_0) # Set vout to 1, to spend the other outpoint (49 coins) of the # in-chain-tx we want to double spend tx.vin[0].prevout.n = 1 - raw_tx_1 = node.signrawtransactionwithwallet( - ToHex(tx))['hex'] + raw_tx_1 = node.signrawtransactionwithwallet(ToHex(tx))["hex"] txid_1 = node.sendrawtransaction(hexstring=raw_tx_1, maxfeerate=0) # Now spend both to "clearly hide" the outputs, ie. remove the coins # from the utxo set by spending them - raw_tx_spend_both = node.signrawtransactionwithwallet(node.createrawtransaction( - inputs=[ - {'txid': txid_0, 'vout': 0}, - {'txid': txid_1, 'vout': 0}, - ], - outputs=[{node.getnewaddress(): 100000}] - ))['hex'] + raw_tx_spend_both = node.signrawtransactionwithwallet( + node.createrawtransaction( + inputs=[ + {"txid": txid_0, "vout": 0}, + {"txid": txid_1, "vout": 0}, + ], + outputs=[{node.getnewaddress(): 100000}], + ) + )["hex"] txid_spend_both = node.sendrawtransaction( - hexstring=raw_tx_spend_both, maxfeerate=0) + hexstring=raw_tx_spend_both, maxfeerate=0 + ) self.generate(node, 1) self.mempool_size = 0 # Now see if we can add the coins back to the utxo set by sending the # exact txs again self.check_mempool_result( result_expected=[ - {'txid': txid_0, 'allowed': False, 'reject-reason': 'missing-inputs'}], + {"txid": txid_0, "allowed": False, "reject-reason": "missing-inputs"} + ], rawtxs=[raw_tx_0], ) self.check_mempool_result( result_expected=[ - {'txid': txid_1, 'allowed': False, 'reject-reason': 'missing-inputs'}], + {"txid": txid_1, "allowed": False, "reject-reason": "missing-inputs"} + ], rawtxs=[raw_tx_1], ) self.log.info('Create a signed "reference" tx for later use') - raw_tx_reference = node.signrawtransactionwithwallet(node.createrawtransaction( - inputs=[{'txid': txid_spend_both, 'vout': 0}], - outputs=[{node.getnewaddress(): 50000}], - ))['hex'] + raw_tx_reference = node.signrawtransactionwithwallet( + node.createrawtransaction( + inputs=[{"txid": txid_spend_both, "vout": 0}], + outputs=[{node.getnewaddress(): 50000}], + ) + )["hex"] tx = FromHex(CTransaction(), raw_tx_reference) # Reference tx should be valid on itself self.check_mempool_result( - result_expected=[{'txid': tx.rehash(), 'allowed': True, - 'size': tx.billable_size(), - 'fees': {'base': Decimal(100_000 - 50_000)}}], + result_expected=[ + { + "txid": tx.rehash(), + "allowed": True, + "size": tx.billable_size(), + "fees": {"base": Decimal(100_000 - 50_000)}, + } + ], rawtxs=[ToHex(tx)], maxfeerate=0, ) - self.log.info('A transaction with no outputs') + self.log.info("A transaction with no outputs") tx = FromHex(CTransaction(), raw_tx_reference) tx.vout = [] # Skip re-signing the transaction for context independent checks from now on # FromHex(tx, node.signrawtransactionwithwallet(ToHex(tx))['hex']) self.check_mempool_result( - result_expected=[{'txid': tx.rehash( - ), 'allowed': False, 'reject-reason': 'bad-txns-vout-empty'}], + result_expected=[ + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "bad-txns-vout-empty", + } + ], rawtxs=[ToHex(tx)], ) - self.log.info('A really large transaction') + self.log.info("A really large transaction") tx = FromHex(CTransaction(), raw_tx_reference) - tx.vin = [tx.vin[0]] * (1 + MAX_BLOCK_BASE_SIZE - // len(tx.vin[0].serialize())) + tx.vin = [tx.vin[0]] * (1 + MAX_BLOCK_BASE_SIZE // len(tx.vin[0].serialize())) self.check_mempool_result( result_expected=[ - {'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-oversize'}], + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "bad-txns-oversize", + } + ], rawtxs=[ToHex(tx)], ) - self.log.info('A transaction with negative output value') + self.log.info("A transaction with negative output value") tx = FromHex(CTransaction(), raw_tx_reference) tx.vout[0].nValue *= -1 self.check_mempool_result( - result_expected=[{'txid': tx.rehash( - ), 'allowed': False, 'reject-reason': 'bad-txns-vout-negative'}], + result_expected=[ + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "bad-txns-vout-negative", + } + ], rawtxs=[ToHex(tx)], ) # The following two validations prevent overflow of the output amounts # (see CVE-2010-5139). - self.log.info('A transaction with too large output value') + self.log.info("A transaction with too large output value") tx = FromHex(CTransaction(), raw_tx_reference) tx.vout[0].nValue = MAX_MONEY + 1 self.check_mempool_result( - result_expected=[{'txid': tx.rehash( - ), 'allowed': False, 'reject-reason': 'bad-txns-vout-toolarge'}], + result_expected=[ + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "bad-txns-vout-toolarge", + } + ], rawtxs=[ToHex(tx)], ) - self.log.info('A transaction with too large sum of output values') + self.log.info("A transaction with too large sum of output values") tx = FromHex(CTransaction(), raw_tx_reference) tx.vout = [tx.vout[0]] * 2 tx.vout[0].nValue = MAX_MONEY self.check_mempool_result( - result_expected=[{'txid': tx.rehash( - ), 'allowed': False, 'reject-reason': 'bad-txns-txouttotal-toolarge'}], + result_expected=[ + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "bad-txns-txouttotal-toolarge", + } + ], rawtxs=[ToHex(tx)], ) - self.log.info('A transaction with duplicate inputs') + self.log.info("A transaction with duplicate inputs") tx = FromHex(CTransaction(), raw_tx_reference) tx.vin = [tx.vin[0]] * 2 self.check_mempool_result( - result_expected=[{'txid': tx.rehash( - ), 'allowed': False, 'reject-reason': 'bad-txns-inputs-duplicate'}], + result_expected=[ + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "bad-txns-inputs-duplicate", + } + ], rawtxs=[ToHex(tx)], ) - self.log.info('A non-coinbase transaction with coinbase-like outpoint') + self.log.info("A non-coinbase transaction with coinbase-like outpoint") tx = FromHex(CTransaction(), raw_tx_reference) - tx.vin.append(CTxIn(COutPoint(txid=0, n=0xffffffff))) + tx.vin.append(CTxIn(COutPoint(txid=0, n=0xFFFFFFFF))) self.check_mempool_result( - result_expected=[{'txid': tx.rehash(), - 'allowed': False, - 'reject-reason': 'bad-txns-prevout-null'}], + result_expected=[ + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "bad-txns-prevout-null", + } + ], rawtxs=[tx.serialize().hex()], ) - self.log.info('A coinbase transaction') + self.log.info("A coinbase transaction") # Pick the input of the first tx we signed, so it has to be a coinbase # tx raw_tx_coinbase_spent = node.getrawtransaction( - txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid']) + txid=node.decoderawtransaction(hexstring=raw_tx_in_block)["vin"][0]["txid"] + ) tx = FromHex(CTransaction(), raw_tx_coinbase_spent) self.check_mempool_result( result_expected=[ - {'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-tx-coinbase'}], + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "bad-tx-coinbase", + } + ], rawtxs=[ToHex(tx)], ) - self.log.info('Some nonstandard transactions') + self.log.info("Some nonstandard transactions") tx = FromHex(CTransaction(), raw_tx_reference) tx.nVersion = 3 # A version currently non-standard self.check_mempool_result( result_expected=[ - {'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'version'}], + {"txid": tx.rehash(), "allowed": False, "reject-reason": "version"} + ], rawtxs=[ToHex(tx)], ) tx = FromHex(CTransaction(), raw_tx_reference) tx.vout[0].scriptPubKey = CScript([OP_0]) # Some non-standard script self.check_mempool_result( result_expected=[ - {'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptpubkey'}], + {"txid": tx.rehash(), "allowed": False, "reject-reason": "scriptpubkey"} + ], rawtxs=[ToHex(tx)], ) tx = FromHex(CTransaction(), raw_tx_reference) key = ECKey() key.generate() pubkey = key.get_pubkey().get_bytes() # Some bare multisig script (2-of-3) tx.vout[0].scriptPubKey = CScript( - [OP_2, pubkey, pubkey, pubkey, OP_3, OP_CHECKMULTISIG]) + [OP_2, pubkey, pubkey, pubkey, OP_3, OP_CHECKMULTISIG] + ) self.check_mempool_result( - result_expected=[{'txid': tx.rehash(), 'allowed': False, - 'reject-reason': 'bare-multisig'}], + result_expected=[ + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "bare-multisig", + } + ], rawtxs=[tx.serialize().hex()], ) tx = FromHex(CTransaction(), raw_tx_reference) # Some not-pushonly scriptSig tx.vin[0].scriptSig = CScript([OP_HASH160]) self.check_mempool_result( - result_expected=[{'txid': tx.rehash( - ), 'allowed': False, 'reject-reason': 'scriptsig-not-pushonly'}], + result_expected=[ + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "scriptsig-not-pushonly", + } + ], rawtxs=[ToHex(tx)], ) tx = FromHex(CTransaction(), raw_tx_reference) # Some too large scriptSig (>1650 bytes) - tx.vin[0].scriptSig = CScript([b'a' * 1648]) + tx.vin[0].scriptSig = CScript([b"a" * 1648]) self.check_mempool_result( - result_expected=[{'txid': tx.rehash(), 'allowed': False, - 'reject-reason': 'scriptsig-size'}], + result_expected=[ + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "scriptsig-size", + } + ], rawtxs=[tx.serialize().hex()], ) tx = FromHex(CTransaction(), raw_tx_reference) - output_p2sh_burn = CTxOut(nValue=540, scriptPubKey=CScript( - [OP_HASH160, hash160(b'burn'), OP_EQUAL])) + output_p2sh_burn = CTxOut( + nValue=540, scriptPubKey=CScript([OP_HASH160, hash160(b"burn"), OP_EQUAL]) + ) # Use enough outputs to make the tx too large for our policy num_scripts = 100000 // len(output_p2sh_burn.serialize()) tx.vout = [output_p2sh_burn] * num_scripts self.check_mempool_result( result_expected=[ - {'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'tx-size'}], + {"txid": tx.rehash(), "allowed": False, "reject-reason": "tx-size"} + ], rawtxs=[ToHex(tx)], ) tx = FromHex(CTransaction(), raw_tx_reference) tx.vout[0] = output_p2sh_burn # Make output smaller, such that it is dust for our policy tx.vout[0].nValue -= 1 self.check_mempool_result( result_expected=[ - {'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'dust'}], + {"txid": tx.rehash(), "allowed": False, "reject-reason": "dust"} + ], rawtxs=[ToHex(tx)], ) tx = FromHex(CTransaction(), raw_tx_reference) - tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'\xff']) + tx.vout[0].scriptPubKey = CScript([OP_RETURN, b"\xff"]) tx.vout = [tx.vout[0]] * 2 self.check_mempool_result( result_expected=[ - {'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'multi-op-return'}], + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "multi-op-return", + } + ], rawtxs=[ToHex(tx)], ) - self.log.info('A timelocked transaction') + self.log.info("A timelocked transaction") tx = FromHex(CTransaction(), raw_tx_reference) # Should be non-max, so locktime is not ignored tx.vin[0].nSequence -= 1 tx.nLockTime = node.getblockcount() + 1 self.check_mempool_result( result_expected=[ - {'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-nonfinal'}], + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "bad-txns-nonfinal", + } + ], rawtxs=[ToHex(tx)], ) - self.log.info('A transaction that is locked by BIP68 sequence logic') + self.log.info("A transaction that is locked by BIP68 sequence logic") tx = FromHex(CTransaction(), raw_tx_reference) # We could include it in the second block mined from now, but not the # very next one tx.vin[0].nSequence = 2 # Can skip re-signing the tx because of early rejection self.check_mempool_result( - result_expected=[{'txid': tx.rehash(), - 'allowed': False, - 'reject-reason': 'non-BIP68-final'}], + result_expected=[ + { + "txid": tx.rehash(), + "allowed": False, + "reject-reason": "non-BIP68-final", + } + ], rawtxs=[tx.serialize().hex()], maxfeerate=0, ) -if __name__ == '__main__': +if __name__ == "__main__": MempoolAcceptanceTest().main() diff --git a/test/functional/mempool_expiry.py b/test/functional/mempool_expiry.py index a59a33501..47ce09864 100755 --- a/test/functional/mempool_expiry.py +++ b/test/functional/mempool_expiry.py @@ -1,125 +1,127 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Tests that a mempool transaction expires after a given timeout and that its children are removed as well. Both the default expiry timeout defined by DEFAULT_MEMPOOL_EXPIRY and a user definable expiry timeout via the '-mempoolexpiry=' command line argument ( is the timeout in hours) are tested. """ from datetime import timedelta from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error from test_framework.wallet import MiniWallet DEFAULT_MEMPOOL_EXPIRY = 336 # hours CUSTOM_MEMPOOL_EXPIRY = 10 # hours class MempoolExpiryTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def test_transaction_expiry(self, timeout): """Tests that a transaction expires after the expiry timeout and its children are removed as well.""" node = self.nodes[0] self.wallet = MiniWallet(node) # Add enough mature utxos to the wallet so that all txs spend confirmed # coins. self.generate(self.wallet, 4) self.generate(node, 100) # Send a parent transaction that will expire. - parent_txid = self.wallet.send_self_transfer(from_node=node)['txid'] + parent_txid = self.wallet.send_self_transfer(from_node=node)["txid"] parent_utxo = self.wallet.get_utxo(txid=parent_txid) independent_utxo = self.wallet.get_utxo() # Ensure the transactions we send to trigger the mempool check spend # utxos that are independent of the transactions being tested for # expiration. trigger_utxo1 = self.wallet.get_utxo() trigger_utxo2 = self.wallet.get_utxo() # Set the mocktime to the arrival time of the parent transaction. - entry_time = node.getmempoolentry(parent_txid)['time'] + entry_time = node.getmempoolentry(parent_txid)["time"] node.setmocktime(entry_time) # Let half of the timeout elapse and broadcast the child transaction # spending the parent transaction. half_expiry_time = entry_time + int(60 * 60 * timeout / 2) node.setmocktime(half_expiry_time) child_txid = self.wallet.send_self_transfer( - from_node=node, utxo_to_spend=parent_utxo)['txid'] - assert_equal( - parent_txid, - node.getmempoolentry(child_txid)['depends'][0]) + from_node=node, utxo_to_spend=parent_utxo + )["txid"] + assert_equal(parent_txid, node.getmempoolentry(child_txid)["depends"][0]) self.log.info( - 'Broadcast child transaction after ' - f'{timedelta(seconds=half_expiry_time - entry_time)} hours.') + "Broadcast child transaction after " + f"{timedelta(seconds=half_expiry_time - entry_time)} hours." + ) # Broadcast another (independent) transaction. independent_txid = self.wallet.send_self_transfer( - from_node=node, utxo_to_spend=independent_utxo)['txid'] + from_node=node, utxo_to_spend=independent_utxo + )["txid"] # Let most of the timeout elapse and check that the parent tx is still # in the mempool. nearly_expiry_time = entry_time + 60 * 60 * timeout - 5 node.setmocktime(nearly_expiry_time) # Broadcast a transaction as the expiry of transactions in the mempool # is only checked when a new transaction is added to the mempool. - self.wallet.send_self_transfer( - from_node=node, utxo_to_spend=trigger_utxo1) + self.wallet.send_self_transfer(from_node=node, utxo_to_spend=trigger_utxo1) self.log.info( - 'Test parent tx not expired after ' - f'{timedelta(seconds=nearly_expiry_time - entry_time)} hours.') - assert_equal(entry_time, node.getmempoolentry(parent_txid)['time']) + "Test parent tx not expired after " + f"{timedelta(seconds=nearly_expiry_time - entry_time)} hours." + ) + assert_equal(entry_time, node.getmempoolentry(parent_txid)["time"]) # Transaction should be evicted from the mempool after the expiry time # has passed. expiry_time = entry_time + 60 * 60 * timeout + 5 node.setmocktime(expiry_time) # Again, broadcast a transaction so the expiry of transactions in the # mempool is checked. - self.wallet.send_self_transfer( - from_node=node, utxo_to_spend=trigger_utxo2) + self.wallet.send_self_transfer(from_node=node, utxo_to_spend=trigger_utxo2) self.log.info( - 'Test parent tx expiry after ' - f'{timedelta(seconds=expiry_time - entry_time)} hours.') - assert_raises_rpc_error(-5, 'Transaction not in mempool', - node.getmempoolentry, parent_txid) + "Test parent tx expiry after " + f"{timedelta(seconds=expiry_time - entry_time)} hours." + ) + assert_raises_rpc_error( + -5, "Transaction not in mempool", node.getmempoolentry, parent_txid + ) # The child transaction should be removed from the mempool as well. - self.log.info('Test child tx is evicted as well.') - assert_raises_rpc_error(-5, 'Transaction not in mempool', - node.getmempoolentry, child_txid) + self.log.info("Test child tx is evicted as well.") + assert_raises_rpc_error( + -5, "Transaction not in mempool", node.getmempoolentry, child_txid + ) # Check that the independent tx is still in the mempool. self.log.info( - f'Test the independent tx not expired after ' - f'{timedelta(seconds=expiry_time - half_expiry_time)} hours.') - assert_equal( - half_expiry_time, - node.getmempoolentry(independent_txid)['time']) + "Test the independent tx not expired after " + f"{timedelta(seconds=expiry_time - half_expiry_time)} hours." + ) + assert_equal(half_expiry_time, node.getmempoolentry(independent_txid)["time"]) def run_test(self): self.log.info( - 'Test default mempool expiry timeout of ' - f'{DEFAULT_MEMPOOL_EXPIRY} hours.') + f"Test default mempool expiry timeout of {DEFAULT_MEMPOOL_EXPIRY} hours." + ) self.test_transaction_expiry(DEFAULT_MEMPOOL_EXPIRY) self.log.info( - f'Test custom mempool expiry timeout of {CUSTOM_MEMPOOL_EXPIRY} hours.') - self.restart_node( - 0, [f'-mempoolexpiry={CUSTOM_MEMPOOL_EXPIRY}']) + f"Test custom mempool expiry timeout of {CUSTOM_MEMPOOL_EXPIRY} hours." + ) + self.restart_node(0, [f"-mempoolexpiry={CUSTOM_MEMPOOL_EXPIRY}"]) self.test_transaction_expiry(CUSTOM_MEMPOOL_EXPIRY) -if __name__ == '__main__': +if __name__ == "__main__": MempoolExpiryTest().main() diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py index ce366b958..578b75f9a 100755 --- a/test/functional/mempool_limit.py +++ b/test/functional/mempool_limit.py @@ -1,90 +1,94 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test mempool limiting together/eviction with the wallet.""" from decimal import Decimal from test_framework.blocktools import create_confirmed_utxos, send_big_transactions from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, ) class MempoolLimitTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 - self.extra_args = [[ - "-acceptnonstdtxn=1", - "-maxmempool=5", - "-spendzeroconfchange=0", - ]] + self.extra_args = [ + [ + "-acceptnonstdtxn=1", + "-maxmempool=5", + "-spendzeroconfchange=0", + ] + ] self.supports_cli = False def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): - relayfee = self.nodes[0].getnetworkinfo()['relayfee'] + relayfee = self.nodes[0].getnetworkinfo()["relayfee"] - self.log.info('Check that mempoolminfee is minrelytxfee') - assert_equal(self.nodes[0].getmempoolinfo()[ - 'minrelaytxfee'], Decimal('10.00')) - assert_equal(self.nodes[0].getmempoolinfo()[ - 'mempoolminfee'], Decimal('10.00')) + self.log.info("Check that mempoolminfee is minrelytxfee") + assert_equal(self.nodes[0].getmempoolinfo()["minrelaytxfee"], Decimal("10.00")) + assert_equal(self.nodes[0].getmempoolinfo()["mempoolminfee"], Decimal("10.00")) txids = [] utxo_groups = 4 - utxos = create_confirmed_utxos( - self, self.nodes[0], 1 + 30 * utxo_groups) + utxos = create_confirmed_utxos(self, self.nodes[0], 1 + 30 * utxo_groups) - self.log.info('Create a mempool tx that will be evicted') + self.log.info("Create a mempool tx that will be evicted") us0 = utxos.pop() inputs = [{"txid": us0["txid"], "vout": us0["vout"]}] outputs = {self.nodes[0].getnewaddress(): 100} tx = self.nodes[0].createrawtransaction(inputs, outputs) # specifically fund this tx with low fee self.nodes[0].settxfee(relayfee) txF = self.nodes[0].fundrawtransaction(tx) # return to automatic fee selection self.nodes[0].settxfee(0) - txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex']) - txid = self.nodes[0].sendrawtransaction(txFS['hex']) + txFS = self.nodes[0].signrawtransactionwithwallet(txF["hex"]) + txid = self.nodes[0].sendrawtransaction(txFS["hex"]) for i in range(utxo_groups): txids.append([]) txids[i] = send_big_transactions( - self.nodes[0], utxos[30 * i:30 * i + 30], 30, 10 * (i + 1)) + self.nodes[0], utxos[30 * i : 30 * i + 30], 30, 10 * (i + 1) + ) - self.log.info('The tx should be evicted by now') + self.log.info("The tx should be evicted by now") assert txid not in self.nodes[0].getrawmempool() txdata = self.nodes[0].gettransaction(txid) # confirmation should still be 0 - assert txdata['confirmations'] == 0 + assert txdata["confirmations"] == 0 - self.log.info('Check that mempoolminfee is larger than minrelytxfee') - assert_equal(self.nodes[0].getmempoolinfo()[ - 'minrelaytxfee'], Decimal('10.00')) - assert_greater_than(self.nodes[0].getmempoolinfo()[ - 'mempoolminfee'], Decimal('10.00')) + self.log.info("Check that mempoolminfee is larger than minrelytxfee") + assert_equal(self.nodes[0].getmempoolinfo()["minrelaytxfee"], Decimal("10.00")) + assert_greater_than( + self.nodes[0].getmempoolinfo()["mempoolminfee"], Decimal("10.00") + ) - self.log.info('Create a mempool tx that will not pass mempoolminfee') + self.log.info("Create a mempool tx that will not pass mempoolminfee") us0 = utxos.pop() inputs = [{"txid": us0["txid"], "vout": us0["vout"]}] outputs = {self.nodes[0].getnewaddress(): 100} tx = self.nodes[0].createrawtransaction(inputs, outputs) # specifically fund this tx with a fee < mempoolminfee, >= than # minrelaytxfee - txF = self.nodes[0].fundrawtransaction(tx, {'feeRate': relayfee}) - txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex']) - assert_raises_rpc_error(-26, "mempool min fee not met", - self.nodes[0].sendrawtransaction, txFS['hex']) + txF = self.nodes[0].fundrawtransaction(tx, {"feeRate": relayfee}) + txFS = self.nodes[0].signrawtransactionwithwallet(txF["hex"]) + assert_raises_rpc_error( + -26, + "mempool min fee not met", + self.nodes[0].sendrawtransaction, + txFS["hex"], + ) -if __name__ == '__main__': +if __name__ == "__main__": MempoolLimitTest().main() diff --git a/test/functional/mempool_package_limits.py b/test/functional/mempool_package_limits.py index b270db066..48f0e08f8 100755 --- a/test/functional/mempool_package_limits.py +++ b/test/functional/mempool_package_limits.py @@ -1,601 +1,676 @@ #!/usr/bin/env python3 # Copyright (c) 2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test logic for limiting mempool and package ancestors/descendants.""" from decimal import Decimal from test_framework.address import ADDRESS_ECREG_P2SH_OP_TRUE, SCRIPTSIG_OP_TRUE from test_framework.messages import XEC, CTransaction, FromHex, ToHex from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import assert_equal from test_framework.wallet import ( bulk_transaction, create_child_with_parents, make_chain, ) FAR_IN_THE_FUTURE = 2000000000 class MempoolPackageLimitsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True - self.extra_args = [[ - # The packages mempool limits are no longer applied after wellington - # activation. - f'-wellingtonactivationtime={FAR_IN_THE_FUTURE}', - ]] + self.extra_args = [ + [ + # The packages mempool limits are no longer applied after wellington + # activation. + f"-wellingtonactivationtime={FAR_IN_THE_FUTURE}", + ] + ] def run_test(self): self.log.info("Generate blocks to create UTXOs") node = self.nodes[0] self.privkeys = [node.get_deterministic_priv_key().key] self.address = node.get_deterministic_priv_key().address self.coins = [] # The last 100 coinbase transactions are premature for b in self.generatetoaddress(node, 200, self.address)[:100]: coinbase = node.getblock(blockhash=b, verbosity=2)["tx"][0] - self.coins.append({ - "txid": coinbase["txid"], - "amount": coinbase["vout"][0]["value"], - "scriptPubKey": coinbase["vout"][0]["scriptPubKey"], - }) + self.coins.append( + { + "txid": coinbase["txid"], + "amount": coinbase["vout"][0]["value"], + "scriptPubKey": coinbase["vout"][0]["scriptPubKey"], + } + ) self.test_chain_limits() self.test_desc_count_limits() self.test_desc_count_limits_2() self.test_anc_count_limits() self.test_anc_count_limits_2() self.test_anc_count_limits_bushy() # The node will accept our (nonstandard) extra large OP_RETURN outputs self.restart_node(0, extra_args=["-acceptnonstdtxn=1"]) self.test_anc_size_limits() self.test_desc_size_limits() def test_chain_limits_helper(self, mempool_count, package_count): node = self.nodes[0] assert_equal(0, node.getmempoolinfo()["size"]) first_coin = self.coins.pop() spk = None txid = first_coin["txid"] chain_hex = [] chain_txns = [] value = first_coin["amount"] for i in range(mempool_count + package_count): (tx, txhex, value, spk) = make_chain( - node, self.address, self.privkeys, txid, value, 0, spk) + node, self.address, self.privkeys, txid, value, 0, spk + ) txid = tx.get_id() if i < mempool_count: node.sendrawtransaction(txhex) else: chain_hex.append(txhex) chain_txns.append(tx) testres_too_long = node.testmempoolaccept(rawtxs=chain_hex) for txres in testres_too_long: assert_equal(txres["package-error"], "package-mempool-limits") # Clear mempool and check that the package passes now self.generate(node, 1) - assert all(res["allowed"] is True - for res in node.testmempoolaccept(rawtxs=chain_hex)) + assert all( + res["allowed"] is True for res in node.testmempoolaccept(rawtxs=chain_hex) + ) def test_chain_limits(self): """Create chains from mempool and package transactions that are longer than 50, but only if both in-mempool and in-package transactions are considered together. This checks that both mempool and in-package transactions are taken into account when calculating ancestors/descendant limits. """ self.log.info( - "Check that in-package ancestors count for mempool ancestor limits") + "Check that in-package ancestors count for mempool ancestor limits" + ) self.test_chain_limits_helper(mempool_count=49, package_count=2) self.test_chain_limits_helper(mempool_count=2, package_count=49) self.test_chain_limits_helper(mempool_count=26, package_count=26) def test_desc_count_limits(self): """Create an 'A' shaped package with 49 transactions in the mempool and 2 in the package: M1 ^ ^ M2a M2b . . . . M25a M25b ^ ^ Pa Pb The top ancestor in the package exceeds descendant limits but only if the in-mempool and in-package descendants are all considered together (49 including in-mempool descendants and 51 including both package transactions). """ node = self.nodes[0] assert_equal(0, node.getmempoolinfo()["size"]) self.log.info( - "Check that in-mempool and in-package descendants are calculated properly in packages") + "Check that in-mempool and in-package descendants are calculated properly" + " in packages" + ) # Top parent in mempool, M1 first_coin = self.coins.pop() # Deduct reasonable fee and make 2 outputs parent_value = (first_coin["amount"] - Decimal("200.00")) / 2 inputs = [{"txid": first_coin["txid"], "vout": 0}] - outputs = [{self.address: parent_value}, - {ADDRESS_ECREG_P2SH_OP_TRUE: parent_value}] + outputs = [ + {self.address: parent_value}, + {ADDRESS_ECREG_P2SH_OP_TRUE: parent_value}, + ] rawtx = node.createrawtransaction(inputs, outputs) parent_signed = node.signrawtransactionwithkey( - hexstring=rawtx, privkeys=self.privkeys) + hexstring=rawtx, privkeys=self.privkeys + ) assert parent_signed["complete"] parent_tx = FromHex(CTransaction(), parent_signed["hex"]) parent_txid = parent_tx.rehash() node.sendrawtransaction(parent_signed["hex"]) package_hex = [] # Chain A spk = parent_tx.vout[0].scriptPubKey.hex() value = parent_value txid = parent_txid for i in range(25): (tx, txhex, value, spk) = make_chain( - node, self.address, self.privkeys, txid, value, 0, spk) + node, self.address, self.privkeys, txid, value, 0, spk + ) txid = tx.get_id() if i < 24: # M2a... M25a node.sendrawtransaction(txhex) else: # Pa package_hex.append(txhex) # Chain B value = parent_value - Decimal("100.00") rawtx_b = node.createrawtransaction( - [{"txid": parent_txid, "vout": 1}], {self.address: value}) + [{"txid": parent_txid, "vout": 1}], {self.address: value} + ) # M2b tx_child_b = FromHex(CTransaction(), rawtx_b) tx_child_b.vin[0].scriptSig = SCRIPTSIG_OP_TRUE pad_tx(tx_child_b) tx_child_b_hex = ToHex(tx_child_b) node.sendrawtransaction(tx_child_b_hex) spk = tx_child_b.vout[0].scriptPubKey.hex() txid = tx_child_b.rehash() for i in range(24): (tx, txhex, value, spk) = make_chain( - node, self.address, self.privkeys, txid, value, 0, spk) + node, self.address, self.privkeys, txid, value, 0, spk + ) txid = tx.get_id() if i < 23: # M3b... M25b node.sendrawtransaction(txhex) else: # Pb package_hex.append(txhex) assert_equal(49, node.getmempoolinfo()["size"]) assert_equal(2, len(package_hex)) testres_too_long = node.testmempoolaccept(rawtxs=package_hex) for txres in testres_too_long: assert_equal(txres["package-error"], "package-mempool-limits") # Clear mempool and check that the package passes now self.generate(node, 1) - assert all(res["allowed"] is True - for res in node.testmempoolaccept(rawtxs=package_hex)) + assert all( + res["allowed"] is True for res in node.testmempoolaccept(rawtxs=package_hex) + ) def test_desc_count_limits_2(self): """Create a Package with 49 transactions in mempool and 2 transactions in package: M1 ^ ^ M2 ^ . ^ . ^ . ^ M49 ^ ^ P1 ^ P2 P1 has M1 as a mempool ancestor, P2 has no in-mempool ancestors, but when combined P2 has M1 as an ancestor and M1 exceeds descendant_limits (48 in-mempool descendants + 2 in-package descendants, a total of 51 including itself). """ node = self.nodes[0] package_hex = [] # M1 first_coin_a = self.coins.pop() # Deduct reasonable fee and make 2 outputs - parent_value = (first_coin_a["amount"] - Decimal('200.0')) / 2 + parent_value = (first_coin_a["amount"] - Decimal("200.0")) / 2 inputs = [{"txid": first_coin_a["txid"], "vout": 0}] - outputs = [{self.address: parent_value}, - {ADDRESS_ECREG_P2SH_OP_TRUE: parent_value}] + outputs = [ + {self.address: parent_value}, + {ADDRESS_ECREG_P2SH_OP_TRUE: parent_value}, + ] rawtx = node.createrawtransaction(inputs, outputs) parent_signed = node.signrawtransactionwithkey( - hexstring=rawtx, privkeys=self.privkeys) + hexstring=rawtx, privkeys=self.privkeys + ) assert parent_signed["complete"] parent_tx = FromHex(CTransaction(), parent_signed["hex"]) pad_tx(parent_tx) parent_txid = parent_tx.rehash() node.sendrawtransaction(parent_signed["hex"]) # Chain M2...M49 spk = parent_tx.vout[0].scriptPubKey.hex() value = parent_value txid = parent_txid for _ in range(48): (tx, txhex, value, spk) = make_chain( - node, self.address, self.privkeys, txid, value, 0, spk) + node, self.address, self.privkeys, txid, value, 0, spk + ) pad_tx(tx) txid = tx.hash node.sendrawtransaction(txhex) # P1 - value_p1 = parent_value - Decimal('100') + value_p1 = parent_value - Decimal("100") rawtx_p1 = node.createrawtransaction( - [{"txid": parent_txid, "vout": 1}], [{self.address: value_p1}]) + [{"txid": parent_txid, "vout": 1}], [{self.address: value_p1}] + ) tx_child_p1 = FromHex(CTransaction(), rawtx_p1) tx_child_p1.vin[0].scriptSig = SCRIPTSIG_OP_TRUE pad_tx(tx_child_p1) tx_child_p1_hex = tx_child_p1.serialize().hex() package_hex.append(tx_child_p1_hex) tx_child_p1_spk = tx_child_p1.vout[0].scriptPubKey.hex() # P2 - (_, tx_child_p2_hex, _, _) = make_chain(node, self.address, - self.privkeys, tx_child_p1.hash, value_p1, 0, tx_child_p1_spk) + (_, tx_child_p2_hex, _, _) = make_chain( + node, + self.address, + self.privkeys, + tx_child_p1.hash, + value_p1, + 0, + tx_child_p1_spk, + ) package_hex.append(tx_child_p2_hex) assert_equal(49, node.getmempoolinfo()["size"]) assert_equal(2, len(package_hex)) testres = node.testmempoolaccept(rawtxs=package_hex) assert_equal(len(testres), len(package_hex)) for txres in testres: assert_equal(txres["package-error"], "package-mempool-limits") # Clear mempool and check that the package passes now self.generate(node, 1) - assert all(res["allowed"] - for res in node.testmempoolaccept(rawtxs=package_hex)) + assert all(res["allowed"] for res in node.testmempoolaccept(rawtxs=package_hex)) def test_anc_count_limits(self): """Create a 'V' shaped chain with 49 transactions in the mempool and 3 in the package: M1a ^ M1b M2a ^ . M2b . . . . M25a M24b ^ ^ Pa Pb ^ ^ Pc The lowest descendant, Pc, exceeds ancestor limits, but only if the in-mempool and in-package ancestors are all considered together. """ node = self.nodes[0] assert_equal(0, node.getmempoolinfo()["size"]) package_hex = [] parents_tx = [] values = [] scripts = [] self.log.info( "Check that in-mempool and in-package ancestors are calculated " - "properly in packages") + "properly in packages" + ) # Two chains of 26 & 25 transactions for chain_length in [26, 25]: spk = None top_coin = self.coins.pop() txid = top_coin["txid"] value = top_coin["amount"] for i in range(chain_length): (tx, txhex, value, spk) = make_chain( - node, self.address, self.privkeys, txid, value, 0, spk) + node, self.address, self.privkeys, txid, value, 0, spk + ) txid = tx.get_id() if i < chain_length - 1: node.sendrawtransaction(txhex) else: # Save the last transaction for the package package_hex.append(txhex) parents_tx.append(tx) scripts.append(spk) values.append(value) # Child Pc child_hex = create_child_with_parents( - node, self.address, self.privkeys, parents_tx, values, scripts) + node, self.address, self.privkeys, parents_tx, values, scripts + ) package_hex.append(child_hex) assert_equal(49, node.getmempoolinfo()["size"]) assert_equal(3, len(package_hex)) testres_too_long = node.testmempoolaccept(rawtxs=package_hex) for txres in testres_too_long: assert_equal(txres["package-error"], "package-mempool-limits") # Clear mempool and check that the package passes now self.generate(node, 1) - assert all(res["allowed"] is True - for res in node.testmempoolaccept(rawtxs=package_hex)) + assert all( + res["allowed"] is True for res in node.testmempoolaccept(rawtxs=package_hex) + ) def test_anc_count_limits_2(self): """Create a 'Y' shaped chain with 49 transactions in the mempool and 2 in the package: M1a ^ M1b M2a ^ . M2b . . . . M25a M24b ^ ^ Pc ^ Pd The lowest descendant, Pc, exceeds ancestor limits, but only if the in-mempool and in-package ancestors are all considered together. """ node = self.nodes[0] assert_equal(0, node.getmempoolinfo()["size"]) parents_tx = [] values = [] scripts = [] self.log.info( - "Check that in-mempool and in-package ancestors are calculated properly in packages") + "Check that in-mempool and in-package ancestors are calculated properly in" + " packages" + ) # Two chains of 25 & 24 transactions for chain_length in [25, 24]: spk = None top_coin = self.coins.pop() txid = top_coin["txid"] value = top_coin["amount"] for i in range(chain_length): (tx, txhex, value, spk) = make_chain( - node, self.address, self.privkeys, txid, value, 0, spk) + node, self.address, self.privkeys, txid, value, 0, spk + ) txid = tx.get_id() node.sendrawtransaction(txhex) if i == chain_length - 1: # last 2 transactions will be the parents of Pc parents_tx.append(tx) values.append(value) scripts.append(spk) # Child Pc pc_hex = create_child_with_parents( - node, self.address, self.privkeys, parents_tx, values, scripts) + node, self.address, self.privkeys, parents_tx, values, scripts + ) pc_tx = FromHex(CTransaction(), pc_hex) pc_value = sum(values) - Decimal("100.00") pc_spk = pc_tx.vout[0].scriptPubKey.hex() # Child Pd (_, pd_hex, _, _) = make_chain( - node, self.address, self.privkeys, pc_tx.get_id(), pc_value, 0, pc_spk) + node, self.address, self.privkeys, pc_tx.get_id(), pc_value, 0, pc_spk + ) assert_equal(49, node.getmempoolinfo()["size"]) testres_too_long = node.testmempoolaccept(rawtxs=[pc_hex, pd_hex]) for txres in testres_too_long: assert_equal(txres["package-error"], "package-mempool-limits") # Clear mempool and check that the package passes now self.generate(node, 1) - assert all(res["allowed"] is True - for res in node.testmempoolaccept(rawtxs=[pc_hex, pd_hex])) + assert all( + res["allowed"] is True + for res in node.testmempoolaccept(rawtxs=[pc_hex, pd_hex]) + ) def test_anc_count_limits_bushy(self): """Create a tree with 45 transactions in the mempool and 6 in the package: M1...M9 M10...M18 M19...M27 M28...M36 M37...M45 ^ ^ ^ ^ ^ (each with 9 parents) P0 P1 P2 P3 P4 ^ ^ ^ ^ ^ (5 parents) PC Where M(9i+1)...M+(9i+9) are the parents of Pi and P0, P1, P2, P3, and P4 are the parents of PC. P0... P4 individually only have 9 parents each, and PC has no in-mempool parents. But combined, PC has 50 in-mempool and in-package parents. """ node = self.nodes[0] assert_equal(0, node.getmempoolinfo()["size"]) package_hex = [] parent_txns = [] parent_values = [] scripts = [] # Make package transactions P0 ... P4 for _ in range(5): gp_tx = [] gp_values = [] gp_scripts = [] # Make mempool transactions M(9i+1)...M(9i+9) for _ in range(9): parent_coin = self.coins.pop() value = parent_coin["amount"] txid = parent_coin["txid"] (tx, txhex, value, spk) = make_chain( - node, self.address, self.privkeys, txid, value) + node, self.address, self.privkeys, txid, value + ) gp_tx.append(tx) gp_values.append(value) gp_scripts.append(spk) node.sendrawtransaction(txhex) # Package transaction Pi pi_hex = create_child_with_parents( - node, self.address, self.privkeys, gp_tx, gp_values, gp_scripts) + node, self.address, self.privkeys, gp_tx, gp_values, gp_scripts + ) package_hex.append(pi_hex) pi_tx = FromHex(CTransaction(), pi_hex) parent_txns.append(pi_tx) parent_values.append(Decimal(pi_tx.vout[0].nValue) / XEC) scripts.append(pi_tx.vout[0].scriptPubKey.hex()) # Package transaction PC package_hex.append( - create_child_with_parents(node, self.address, self.privkeys, - parent_txns, parent_values, scripts)) + create_child_with_parents( + node, self.address, self.privkeys, parent_txns, parent_values, scripts + ) + ) assert_equal(45, node.getmempoolinfo()["size"]) assert_equal(6, len(package_hex)) testres = node.testmempoolaccept(rawtxs=package_hex) for txres in testres: assert_equal(txres["package-error"], "package-mempool-limits") # Clear mempool and check that the package passes now self.generate(node, 1) - assert all(res["allowed"] is True - for res in node.testmempoolaccept(rawtxs=package_hex)) + assert all( + res["allowed"] is True for res in node.testmempoolaccept(rawtxs=package_hex) + ) def test_anc_size_limits(self): """Test Case with 2 independent transactions in the mempool and a parent + child in the package, where the package parent is the child of both mempool transactions (30KB each): A B ^ ^ C ^ D The lowest descendant, D, exceeds ancestor size limits, but only if the in-mempool and in-package ancestors are all considered together. """ node = self.nodes[0] assert_equal(0, node.getmempoolinfo()["size"]) parents_tx = [] values = [] scripts = [] target_size = 30_000 # 10 sats/B high_fee = Decimal("3000.00") self.log.info( - "Check that in-mempool and in-package ancestor size limits are calculated properly in packages") + "Check that in-mempool and in-package ancestor size limits are calculated" + " properly in packages" + ) # Mempool transactions A and B for _ in range(2): spk = None top_coin = self.coins.pop() txid = top_coin["txid"] value = top_coin["amount"] (tx, _, _, _) = make_chain( - node, self.address, self.privkeys, txid, value, 0, spk, high_fee) + node, self.address, self.privkeys, txid, value, 0, spk, high_fee + ) bulked_tx = bulk_transaction(tx, node, target_size, self.privkeys) node.sendrawtransaction(ToHex(bulked_tx)) parents_tx.append(bulked_tx) values.append(Decimal(bulked_tx.vout[0].nValue) / XEC) scripts.append(bulked_tx.vout[0].scriptPubKey.hex()) # Package transaction C small_pc_hex = create_child_with_parents( - node, self.address, self.privkeys, parents_tx, values, scripts, high_fee) + node, self.address, self.privkeys, parents_tx, values, scripts, high_fee + ) pc_tx = bulk_transaction( - FromHex(CTransaction(), small_pc_hex), node, target_size, self.privkeys) + FromHex(CTransaction(), small_pc_hex), node, target_size, self.privkeys + ) pc_value = Decimal(pc_tx.vout[0].nValue) / XEC pc_spk = pc_tx.vout[0].scriptPubKey.hex() pc_hex = ToHex(pc_tx) # Package transaction D (small_pd, _, val, spk) = make_chain( - node, self.address, self.privkeys, pc_tx.rehash(), pc_value, 0, pc_spk, high_fee) - prevtxs = [{ - "txid": pc_tx.get_id(), - "vout": 0, - "scriptPubKey": spk, - "amount": pc_value, - }] - pd_tx = bulk_transaction( - small_pd, node, target_size, self.privkeys, prevtxs) + node, + self.address, + self.privkeys, + pc_tx.rehash(), + pc_value, + 0, + pc_spk, + high_fee, + ) + prevtxs = [ + { + "txid": pc_tx.get_id(), + "vout": 0, + "scriptPubKey": spk, + "amount": pc_value, + } + ] + pd_tx = bulk_transaction(small_pd, node, target_size, self.privkeys, prevtxs) pd_hex = ToHex(pd_tx) assert_equal(2, node.getmempoolinfo()["size"]) testres_too_heavy = node.testmempoolaccept(rawtxs=[pc_hex, pd_hex]) for txres in testres_too_heavy: assert_equal(txres["package-error"], "package-mempool-limits") # Clear mempool and check that the package passes now self.generate(node, 1) - assert all(res["allowed"] is True - for res in node.testmempoolaccept(rawtxs=[pc_hex, pd_hex])) + assert all( + res["allowed"] is True + for res in node.testmempoolaccept(rawtxs=[pc_hex, pd_hex]) + ) def test_desc_size_limits(self): """Create 3 mempool transactions and 2 package transactions (25KB each): Ma ^ ^ Mb Mc ^ ^ Pd Pe The top ancestor in the package exceeds descendant size limits but only if the in-mempool and in-package descendants are all considered together. """ node = self.nodes[0] assert_equal(0, node.getmempoolinfo()["size"]) target_size = 21_000 # 10 sats/vB high_fee = Decimal("2100.00") self.log.info( - "Check that in-mempool and in-package descendant sizes are calculated properly in packages") + "Check that in-mempool and in-package descendant sizes are calculated" + " properly in packages" + ) # Top parent in mempool, Ma first_coin = self.coins.pop() # Deduct fee and make 2 outputs parent_value = (first_coin["amount"] - high_fee) / 2 inputs = [{"txid": first_coin["txid"], "vout": 0}] - outputs = [{self.address: parent_value}, - {ADDRESS_ECREG_P2SH_OP_TRUE: parent_value}] + outputs = [ + {self.address: parent_value}, + {ADDRESS_ECREG_P2SH_OP_TRUE: parent_value}, + ] rawtx = node.createrawtransaction(inputs, outputs) parent_tx = bulk_transaction( - FromHex(CTransaction(), rawtx), node, target_size, self.privkeys) + FromHex(CTransaction(), rawtx), node, target_size, self.privkeys + ) node.sendrawtransaction(ToHex(parent_tx)) package_hex = [] # Two legs (left and right) for j in range(2): # Mempool transaction (Mb and Mc) spk = parent_tx.vout[j].scriptPubKey.hex() value = Decimal(parent_tx.vout[j].nValue) / XEC txid = parent_tx.get_id() - prevtxs = [{ - "txid": txid, - "vout": j, - "scriptPubKey": spk, - "amount": value, - }] + prevtxs = [ + { + "txid": txid, + "vout": j, + "scriptPubKey": spk, + "amount": value, + } + ] if j == 0: # normal key (tx_small, _, _, _) = make_chain( - node, self.address, self.privkeys, txid, value, j, spk, high_fee) + node, self.address, self.privkeys, txid, value, j, spk, high_fee + ) mempool_tx = bulk_transaction( - tx_small, node, target_size, self.privkeys, prevtxs) + tx_small, node, target_size, self.privkeys, prevtxs + ) else: # OP_TRUE inputs = [{"txid": txid, "vout": 1}] outputs = {self.address: value - high_fee} small_tx = FromHex( - CTransaction(), node.createrawtransaction(inputs, outputs)) + CTransaction(), node.createrawtransaction(inputs, outputs) + ) mempool_tx = bulk_transaction( - small_tx, node, target_size, None, prevtxs) + small_tx, node, target_size, None, prevtxs + ) node.sendrawtransaction(ToHex(mempool_tx)) # Package transaction (Pd and Pe) spk = mempool_tx.vout[0].scriptPubKey.hex() value = Decimal(mempool_tx.vout[0].nValue) / XEC txid = mempool_tx.get_id() (tx_small, _, _, _) = make_chain( - node, self.address, self.privkeys, txid, value, 0, spk, high_fee) - prevtxs = [{ - "txid": txid, - "vout": 0, - "scriptPubKey": spk, - "amount": value, - }] + node, self.address, self.privkeys, txid, value, 0, spk, high_fee + ) + prevtxs = [ + { + "txid": txid, + "vout": 0, + "scriptPubKey": spk, + "amount": value, + } + ] package_tx = bulk_transaction( - tx_small, node, target_size, self.privkeys, prevtxs) + tx_small, node, target_size, self.privkeys, prevtxs + ) package_hex.append(ToHex(package_tx)) assert_equal(3, node.getmempoolinfo()["size"]) assert_equal(2, len(package_hex)) testres_too_heavy = node.testmempoolaccept(rawtxs=package_hex) for txres in testres_too_heavy: assert_equal(txres["package-error"], "package-mempool-limits") # Clear mempool and check that the package passes now self.generate(node, 1) - assert all(res["allowed"] is True - for res in node.testmempoolaccept(rawtxs=package_hex)) + assert all( + res["allowed"] is True for res in node.testmempoolaccept(rawtxs=package_hex) + ) if __name__ == "__main__": MempoolPackageLimitsTest().main() diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py index 43d85c3b6..10ff68ef4 100755 --- a/test/functional/mempool_packages.py +++ b/test/functional/mempool_packages.py @@ -1,369 +1,393 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test descendant package tracking code.""" from decimal import Decimal from test_framework.p2p import P2PTxInvStore from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error, satoshi_round # default limits MAX_ANCESTORS = 50 MAX_DESCENDANTS = 50 # custom limits for node1 MAX_ANCESTORS_CUSTOM = 5 FAR_IN_THE_FUTURE = 2000000000 class MempoolPackagesTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 common_params = [ "-maxorphantx=1000", "-deprecatedrpc=mempool_ancestors_descendants", # This test tests mempool ancestor chain limits, which are no longer # enforced after wellington, so we need to force wellington to # activate in the distant future f"-wellingtonactivationtime={FAR_IN_THE_FUTURE}", ] self.extra_args = [ - common_params, common_params + - [f"-limitancestorcount={MAX_ANCESTORS_CUSTOM}"]] + common_params, + common_params + [f"-limitancestorcount={MAX_ANCESTORS_CUSTOM}"], + ] def skip_test_if_missing_module(self): self.skip_if_no_wallet() # Build a transaction that spends parent_txid:vout # Return amount sent - def chain_transaction(self, node, parent_txid, vout, - value, fee, num_outputs): + def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs): send_value = satoshi_round((value - fee) / num_outputs) - inputs = [{'txid': parent_txid, 'vout': vout}] + inputs = [{"txid": parent_txid, "vout": vout}] outputs = {} for _ in range(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs) signedtx = node.signrawtransactionwithwallet(rawtx) - txid = node.sendrawtransaction(signedtx['hex']) + txid = node.sendrawtransaction(signedtx["hex"]) fulltx = node.getrawtransaction(txid, 1) # make sure we didn't generate a change output - assert len(fulltx['vout']) == num_outputs + assert len(fulltx["vout"]) == num_outputs return (txid, send_value) def run_test(self): # Mine some blocks and have them mature. # keep track of invs peer_inv_store = self.nodes[0].add_p2p_connection(P2PTxInvStore()) self.generate(self.nodes[0], 101) utxo = self.nodes[0].listunspent(10) - txid = utxo[0]['txid'] - vout = utxo[0]['vout'] - value = utxo[0]['amount'] - assert 'ancestorcount' not in utxo[0] - assert 'ancestorsize' not in utxo[0] - assert 'ancestorfees' not in utxo[0] + txid = utxo[0]["txid"] + vout = utxo[0]["vout"] + value = utxo[0]["amount"] + assert "ancestorcount" not in utxo[0] + assert "ancestorsize" not in utxo[0] + assert "ancestorfees" not in utxo[0] fee = Decimal("100") # MAX_ANCESTORS transactions off a confirmed tx should be fine chain = [] ancestor_size = 0 ancestor_fees = Decimal(0) for i in range(MAX_ANCESTORS): (txid, sent_value) = self.chain_transaction( - self.nodes[0], txid, 0, value, fee, 1) + self.nodes[0], txid, 0, value, fee, 1 + ) value = sent_value chain.append(txid) # Check that listunspent ancestor{count, size, fees} yield the # correct results wallet_unspent = self.nodes[0].listunspent(minconf=0) this_unspent = next( - utxo_info for utxo_info in wallet_unspent if utxo_info['txid'] == txid) - assert_equal(this_unspent['ancestorcount'], i + 1) - ancestor_size += self.nodes[0].getrawtransaction( - txid=txid, verbose=True)['size'] - assert_equal(this_unspent['ancestorsize'], ancestor_size) - ancestor_fees -= self.nodes[0].gettransaction(txid=txid)['fee'] - assert_equal(this_unspent['ancestorfees'], ancestor_fees) + utxo_info for utxo_info in wallet_unspent if utxo_info["txid"] == txid + ) + assert_equal(this_unspent["ancestorcount"], i + 1) + ancestor_size += self.nodes[0].getrawtransaction(txid=txid, verbose=True)[ + "size" + ] + assert_equal(this_unspent["ancestorsize"], ancestor_size) + ancestor_fees -= self.nodes[0].gettransaction(txid=txid)["fee"] + assert_equal(this_unspent["ancestorfees"], ancestor_fees) # Wait until mempool transactions have passed initial broadcast # (sent inv and received getdata) # Otherwise, getrawmempool may be inconsistent with getmempoolentry if # unbroadcast changes in between peer_inv_store.wait_for_broadcast(chain) # Check mempool has MAX_ANCESTORS transactions in it, and descendant and ancestor # count and fees should look correct mempool = self.nodes[0].getrawmempool(True) assert_equal(len(mempool), MAX_ANCESTORS) descendant_count = 1 descendant_fees = 0 descendant_size = 0 - assert_equal(ancestor_size, - sum([mempool[tx]['size'] for tx in mempool])) + assert_equal(ancestor_size, sum([mempool[tx]["size"] for tx in mempool])) ancestor_count = MAX_ANCESTORS - assert_equal(ancestor_fees, - sum([mempool[tx]['fees']['base'] for tx in mempool])) + assert_equal( + ancestor_fees, sum([mempool[tx]["fees"]["base"] for tx in mempool]) + ) descendants = [] ancestors = list(chain) for x in reversed(chain): # Check that getmempoolentry is consistent with getrawmempool entry = self.nodes[0].getmempoolentry(x) assert_equal(entry, mempool[x]) # Check that the descendant calculations are correct - assert_equal(mempool[x]['descendantcount'], descendant_count) - descendant_fees += mempool[x]['fees']['base'] - assert_equal( - mempool[x]['fees']['modified'], - mempool[x]['fees']['base']) - assert_equal(mempool[x]['fees']['descendant'], descendant_fees) - descendant_size += mempool[x]['size'] - assert_equal(mempool[x]['descendantsize'], descendant_size) + assert_equal(mempool[x]["descendantcount"], descendant_count) + descendant_fees += mempool[x]["fees"]["base"] + assert_equal(mempool[x]["fees"]["modified"], mempool[x]["fees"]["base"]) + assert_equal(mempool[x]["fees"]["descendant"], descendant_fees) + descendant_size += mempool[x]["size"] + assert_equal(mempool[x]["descendantsize"], descendant_size) descendant_count += 1 # Check that ancestor calculations are correct - assert_equal(mempool[x]['ancestorcount'], ancestor_count) - assert_equal(mempool[x]['fees']['ancestor'], ancestor_fees) - assert_equal(mempool[x]['ancestorsize'], ancestor_size) - ancestor_size -= mempool[x]['size'] - ancestor_fees -= mempool[x]['fees']['base'] + assert_equal(mempool[x]["ancestorcount"], ancestor_count) + assert_equal(mempool[x]["fees"]["ancestor"], ancestor_fees) + assert_equal(mempool[x]["ancestorsize"], ancestor_size) + ancestor_size -= mempool[x]["size"] + ancestor_fees -= mempool[x]["fees"]["base"] ancestor_count -= 1 # Check that parent/child list is correct - assert_equal(mempool[x]['spentby'], descendants[-1:]) - assert_equal(mempool[x]['depends'], ancestors[-2:-1]) + assert_equal(mempool[x]["spentby"], descendants[-1:]) + assert_equal(mempool[x]["depends"], ancestors[-2:-1]) # Check that getmempooldescendants is correct - assert_equal(sorted(descendants), sorted( - self.nodes[0].getmempooldescendants(x))) + assert_equal( + sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)) + ) # Check getmempooldescendants verbose output is correct - for descendant, dinfo in self.nodes[0].getmempooldescendants( - x, True).items(): - assert_equal(dinfo['depends'], [ - chain[chain.index(descendant) - 1]]) - if dinfo['descendantcount'] > 1: - assert_equal(dinfo['spentby'], [ - chain[chain.index(descendant) + 1]]) + for descendant, dinfo in ( + self.nodes[0].getmempooldescendants(x, True).items() + ): + assert_equal(dinfo["depends"], [chain[chain.index(descendant) - 1]]) + if dinfo["descendantcount"] > 1: + assert_equal(dinfo["spentby"], [chain[chain.index(descendant) + 1]]) else: - assert_equal(dinfo['spentby'], []) + assert_equal(dinfo["spentby"], []) descendants.append(x) # Check that getmempoolancestors is correct ancestors.remove(x) - assert_equal(sorted(ancestors), sorted( - self.nodes[0].getmempoolancestors(x))) + assert_equal( + sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)) + ) # Check that getmempoolancestors verbose output is correct - for ancestor, ainfo in self.nodes[0].getmempoolancestors( - x, True).items(): - assert_equal(ainfo['spentby'], [ - chain[chain.index(ancestor) + 1]]) - if ainfo['ancestorcount'] > 1: - assert_equal(ainfo['depends'], [ - chain[chain.index(ancestor) - 1]]) + for ancestor, ainfo in self.nodes[0].getmempoolancestors(x, True).items(): + assert_equal(ainfo["spentby"], [chain[chain.index(ancestor) + 1]]) + if ainfo["ancestorcount"] > 1: + assert_equal(ainfo["depends"], [chain[chain.index(ancestor) - 1]]) else: - assert_equal(ainfo['depends'], []) + assert_equal(ainfo["depends"], []) # Check that getmempoolancestors/getmempooldescendants correctly handle # verbose=true v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True) assert_equal(len(v_ancestors), len(chain) - 1) for x in v_ancestors.keys(): assert_equal(mempool[x], v_ancestors[x]) assert chain[-1] not in v_ancestors.keys() v_descendants = self.nodes[0].getmempooldescendants(chain[0], True) assert_equal(len(v_descendants), len(chain) - 1) for x in v_descendants.keys(): assert_equal(mempool[x], v_descendants[x]) assert chain[0] not in v_descendants.keys() # Check that ancestor modified fees includes fee deltas from # prioritisetransaction self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=1000) mempool = self.nodes[0].getrawmempool(True) ancestor_fees = 0 for x in chain: - ancestor_fees += mempool[x]['fees']['base'] - assert_equal(mempool[x]['fees']['ancestor'], - ancestor_fees + Decimal('10.00')) + ancestor_fees += mempool[x]["fees"]["base"] + assert_equal( + mempool[x]["fees"]["ancestor"], ancestor_fees + Decimal("10.00") + ) # Undo the prioritisetransaction for later tests self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=-1000) # Check that descendant modified fees includes fee deltas from # prioritisetransaction self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=1000) mempool = self.nodes[0].getrawmempool(True) descendant_fees = 0 for x in reversed(chain): - descendant_fees += mempool[x]['fees']['base'] - assert_equal(mempool[x]['fees']['descendant'], - descendant_fees + Decimal('10.00')) + descendant_fees += mempool[x]["fees"]["base"] + assert_equal( + mempool[x]["fees"]["descendant"], descendant_fees + Decimal("10.00") + ) # Adding one more transaction on to the chain should fail. - assert_raises_rpc_error(-26, "too-long-mempool-chain", - self.chain_transaction, self.nodes[0], txid, vout, value, fee, 1) + assert_raises_rpc_error( + -26, + "too-long-mempool-chain", + self.chain_transaction, + self.nodes[0], + txid, + vout, + value, + fee, + 1, + ) # Check that prioritising a tx before it's added to the mempool works # First clear the mempool by mining a block. self.generate(self.nodes[0], 1) assert_equal(len(self.nodes[0].getrawmempool()), 0) # Prioritise a transaction that has been mined, then add it back to the # mempool by using invalidateblock. self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=2000) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Keep node1's tip synced with node0 self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash()) # Now check that the transaction is in the mempool, with the right # modified fee mempool = self.nodes[0].getrawmempool(True) descendant_fees = 0 for x in reversed(chain): - descendant_fees += mempool[x]['fees']['base'] - if (x == chain[-1]): - assert_equal(mempool[x]['fees']['modified'], - mempool[x]['fees']['base'] + satoshi_round(20.00)) - assert_equal(mempool[x]['fees']['descendant'], - descendant_fees + satoshi_round(20.00)) + descendant_fees += mempool[x]["fees"]["base"] + if x == chain[-1]: + assert_equal( + mempool[x]["fees"]["modified"], + mempool[x]["fees"]["base"] + satoshi_round(20.00), + ) + assert_equal( + mempool[x]["fees"]["descendant"], descendant_fees + satoshi_round(20.00) + ) # Check that node1's mempool is as expected (-> custom ancestor limit) mempool0 = self.nodes[0].getrawmempool(False) mempool1 = self.nodes[1].getrawmempool(False) assert_equal(len(mempool1), MAX_ANCESTORS_CUSTOM) assert set(mempool1).issubset(set(mempool0)) for tx in chain[:MAX_ANCESTORS_CUSTOM]: assert tx in mempool1 # TODO: more detailed check of node1's mempool (fees etc.) # check transaction unbroadcast info (should be false if in both # mempools) mempool = self.nodes[0].getrawmempool(True) for tx in mempool: - assert_equal(mempool[tx]['unbroadcast'], False) + assert_equal(mempool[tx]["unbroadcast"], False) # TODO: test ancestor size limits # Now test descendant chain limits - txid = utxo[1]['txid'] - value = utxo[1]['amount'] - vout = utxo[1]['vout'] + txid = utxo[1]["txid"] + value = utxo[1]["amount"] + vout = utxo[1]["vout"] transaction_package = [] tx_children = [] # First create one parent tx with 10 children (txid, sent_value) = self.chain_transaction( - self.nodes[0], txid, vout, value, fee, 10) + self.nodes[0], txid, vout, value, fee, 10 + ) parent_transaction = txid for i in range(10): - transaction_package.append( - {'txid': txid, 'vout': i, 'amount': sent_value}) + transaction_package.append({"txid": txid, "vout": i, "amount": sent_value}) # Sign and send up to MAX_DESCENDANT transactions chained off the # parent tx for _ in range(MAX_DESCENDANTS - 1): utxo = transaction_package.pop(0) (txid, sent_value) = self.chain_transaction( - self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10) - if utxo['txid'] is parent_transaction: + self.nodes[0], utxo["txid"], utxo["vout"], utxo["amount"], fee, 10 + ) + if utxo["txid"] is parent_transaction: tx_children.append(txid) for j in range(10): transaction_package.append( - {'txid': txid, 'vout': j, 'amount': sent_value}) + {"txid": txid, "vout": j, "amount": sent_value} + ) mempool = self.nodes[0].getrawmempool(True) - assert_equal(mempool[parent_transaction] - ['descendantcount'], MAX_DESCENDANTS) - assert_equal(sorted(mempool[parent_transaction] - ['spentby']), sorted(tx_children)) + assert_equal(mempool[parent_transaction]["descendantcount"], MAX_DESCENDANTS) + assert_equal( + sorted(mempool[parent_transaction]["spentby"]), sorted(tx_children) + ) for child in tx_children: - assert_equal(mempool[child]['depends'], [parent_transaction]) + assert_equal(mempool[child]["depends"], [parent_transaction]) # Sending one more chained transaction will fail utxo = transaction_package.pop(0) - assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, - self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10) + assert_raises_rpc_error( + -26, + "too-long-mempool-chain", + self.chain_transaction, + self.nodes[0], + utxo["txid"], + utxo["vout"], + utxo["amount"], + fee, + 10, + ) # TODO: check that node1's mempool is as expected # TODO: test descendant size limits # Test reorg handling # First, the basics: self.generate(self.nodes[0], 1) self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash()) self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash()) # Now test the case where node1 has a transaction T in its mempool that # depends on transactions A and B which are in a mined block, and the # block containing A and B is disconnected, AND B is not accepted back # into node1's mempool because its ancestor count is too high. # Create 8 transactions, like so: # Tx0 -> Tx1 (vout0) # \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7 # # Mine them in the next block, then generate a new tx8 that spends # Tx1 and Tx7, and add to node1's mempool, then disconnect the # last block. # Create tx0 with 2 outputs utxo = self.nodes[0].listunspent() - txid = utxo[0]['txid'] - value = utxo[0]['amount'] - vout = utxo[0]['vout'] + txid = utxo[0]["txid"] + value = utxo[0]["amount"] + vout = utxo[0]["vout"] send_value = satoshi_round((value - fee) / 2) - inputs = [{'txid': txid, 'vout': vout}] + inputs = [{"txid": txid, "vout": vout}] outputs = {} for _ in range(2): outputs[self.nodes[0].getnewaddress()] = send_value rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx) - txid = self.nodes[0].sendrawtransaction(signedtx['hex']) + txid = self.nodes[0].sendrawtransaction(signedtx["hex"]) tx0_id = txid value = send_value # Create tx1 - tx1_id, _ = self.chain_transaction( - self.nodes[0], tx0_id, 0, value, fee, 1) + tx1_id, _ = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1) # Create tx2-7 vout = 1 txid = tx0_id for _ in range(6): (txid, sent_value) = self.chain_transaction( - self.nodes[0], txid, vout, value, fee, 1) + self.nodes[0], txid, vout, value, fee, 1 + ) vout = 0 value = sent_value # Mine these in a block self.generate(self.nodes[0], 1) # Now generate tx8, with a big fee - inputs = [{'txid': tx1_id, 'vout': 0}, {'txid': txid, 'vout': 0}] + inputs = [{"txid": tx1_id, "vout": 0}, {"txid": txid, "vout": 0}] outputs = {self.nodes[0].getnewaddress(): send_value + value - 4 * fee} rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx) - txid = self.nodes[0].sendrawtransaction(signedtx['hex']) + txid = self.nodes[0].sendrawtransaction(signedtx["hex"]) self.sync_mempools() # Now try to disconnect the tip on each node... self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash()) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.sync_blocks() -if __name__ == '__main__': +if __name__ == "__main__": MempoolPackagesTest().main() diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py index 7f027d1c8..41b3d2d0c 100755 --- a/test/functional/mempool_persist.py +++ b/test/functional/mempool_persist.py @@ -1,213 +1,225 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test mempool persistence. By default, bitcoind will dump mempool on shutdown and then reload it on startup. This can be overridden with the -persistmempool=0 command line option. Test is as follows: - start node0, node1 and node2. node1 has -persistmempool=0 - create 5 transactions on node2 to its own address. Note that these are not sent to node0 or node1 addresses because we don't want them to be saved in the wallet. - check that node0 and node1 have 5 transactions in their mempools - shutdown all nodes. - startup node0. Verify that it still has 5 transactions in its mempool. Shutdown node0. This tests that by default the mempool is persistent. - startup node1. Verify that its mempool is empty. Shutdown node1. This tests that with -persistmempool=0, the mempool is not dumped to disk when the node is shut down. - Restart node0 with -persistmempool=0. Verify that its mempool is empty. Shutdown node0. This tests that with -persistmempool=0, the mempool is not loaded from disk on start up. - Restart node0 with -persistmempool. Verify that it has 5 transactions in its mempool. This tests that -persistmempool=0 does not overwrite a previously valid mempool stored on disk. - Remove node0 mempool.dat and verify savemempool RPC recreates it and verify that node1 can load it and has 5 transactions in its mempool. - Verify that savemempool throws when the RPC is called if node1 can't write to disk. """ import os import time from decimal import Decimal from test_framework.p2p import P2PTxInvStore from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than_or_equal, assert_raises_rpc_error, ) class MempoolPersistTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 3 self.extra_args = [[], ["-persistmempool=0"], []] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): self.log.debug("Send 5 transactions from node2 (to its own address)") tx_creation_time_lower = int(time.time()) for _ in range(5): last_txid = self.nodes[2].sendtoaddress( - self.nodes[2].getnewaddress(), Decimal("10")) + self.nodes[2].getnewaddress(), Decimal("10") + ) node2_balance = self.nodes[2].getbalance() self.sync_all() tx_creation_time_higher = int(time.time()) self.log.debug( - "Verify that node0 and node1 have 5 transactions in their mempools") + "Verify that node0 and node1 have 5 transactions in their mempools" + ) assert_equal(len(self.nodes[0].getrawmempool()), 5) assert_equal(len(self.nodes[1].getrawmempool()), 5) - total_fee_old = self.nodes[0].getmempoolinfo()['total_fee'] + total_fee_old = self.nodes[0].getmempoolinfo()["total_fee"] self.log.debug("Prioritize a transaction on node0") - fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees'] - assert_equal(fees['base'], fees['modified']) + fees = self.nodes[0].getmempoolentry(txid=last_txid)["fees"] + assert_equal(fees["base"], fees["modified"]) self.nodes[0].prioritisetransaction(txid=last_txid, fee_delta=1000) - fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees'] - assert_equal(fees['base'] + Decimal('10.0'), fees['modified']) + fees = self.nodes[0].getmempoolentry(txid=last_txid)["fees"] + assert_equal(fees["base"] + Decimal("10.0"), fees["modified"]) self.log.info( - 'Check the total base fee is unchanged after prioritisetransaction') + "Check the total base fee is unchanged after prioritisetransaction" + ) + assert_equal(total_fee_old, self.nodes[0].getmempoolinfo()["total_fee"]) assert_equal( total_fee_old, - self.nodes[0].getmempoolinfo()['total_fee']) - assert_equal(total_fee_old, - sum(v['fees']['base'] for k, v - in self.nodes[0].getrawmempool(verbose=True).items())) + sum( + v["fees"]["base"] + for k, v in self.nodes[0].getrawmempool(verbose=True).items() + ), + ) - tx_creation_time = self.nodes[0].getmempoolentry(txid=last_txid)[ - 'time'] + tx_creation_time = self.nodes[0].getmempoolentry(txid=last_txid)["time"] assert_greater_than_or_equal(tx_creation_time, tx_creation_time_lower) assert_greater_than_or_equal(tx_creation_time_higher, tx_creation_time) # disconnect nodes & make a txn that remains in the unbroadcast set. self.disconnect_nodes(0, 1) assert len(self.nodes[0].getpeerinfo()) == 0 assert len(self.nodes[0].p2ps) == 0 - self.nodes[0].sendtoaddress( - self.nodes[2].getnewaddress(), Decimal("12000000")) + self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("12000000")) self.connect_nodes(0, 2) - self.log.debug("Stop-start the nodes. Verify that node0 has the " - "transactions in its mempool and node1 does not. " - "Verify that node2 calculates its balance correctly " - "after loading wallet transactions.") + self.log.debug( + "Stop-start the nodes. Verify that node0 has the " + "transactions in its mempool and node1 does not. " + "Verify that node2 calculates its balance correctly " + "after loading wallet transactions." + ) self.stop_nodes() # Give this one a head-start, so we can be "extra-sure" that it didn't # load anything later # Also don't store the mempool, to keep the datadir clean self.start_node(1, extra_args=["-persistmempool=0"]) self.start_node(0) self.start_node(2) # start_node is blocking on the mempool being loaded assert self.nodes[0].getmempoolinfo()["loaded"] assert self.nodes[2].getmempoolinfo()["loaded"] assert_equal(len(self.nodes[0].getrawmempool()), 6) assert_equal(len(self.nodes[2].getrawmempool()), 5) # The others have loaded their mempool. If node_1 loaded anything, we'd # probably notice by now: assert_equal(len(self.nodes[1].getrawmempool()), 0) - self.log.debug('Verify prioritization is loaded correctly') - fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees'] - assert_equal(fees['base'] + Decimal('10.00'), fees['modified']) + self.log.debug("Verify prioritization is loaded correctly") + fees = self.nodes[0].getmempoolentry(txid=last_txid)["fees"] + assert_equal(fees["base"] + Decimal("10.00"), fees["modified"]) - self.log.debug('Verify time is loaded correctly') + self.log.debug("Verify time is loaded correctly") assert_equal( - tx_creation_time, - self.nodes[0].getmempoolentry( - txid=last_txid)['time']) + tx_creation_time, self.nodes[0].getmempoolentry(txid=last_txid)["time"] + ) # Verify accounting of mempool transactions after restart is correct # Flush mempool to wallet self.nodes[2].syncwithvalidationinterfacequeue() assert_equal(node2_balance, self.nodes[2].getbalance()) # start node0 with wallet disabled so wallet transactions don't get # resubmitted self.log.debug( - "Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.") + "Stop-start node0 with -persistmempool=0. Verify that it doesn't load its" + " mempool.dat file." + ) self.stop_nodes() self.start_node(0, extra_args=["-persistmempool=0", "-disablewallet"]) assert self.nodes[0].getmempoolinfo()["loaded"] assert_equal(len(self.nodes[0].getrawmempool()), 0) self.log.debug( - "Stop-start node0. Verify that it has the transactions in its mempool.") + "Stop-start node0. Verify that it has the transactions in its mempool." + ) self.stop_nodes() self.start_node(0) assert self.nodes[0].getmempoolinfo()["loaded"] assert_equal(len(self.nodes[0].getrawmempool()), 6) - mempooldat0 = os.path.join( - self.nodes[0].datadir, self.chain, 'mempool.dat') - mempooldat1 = os.path.join( - self.nodes[1].datadir, self.chain, 'mempool.dat') + mempooldat0 = os.path.join(self.nodes[0].datadir, self.chain, "mempool.dat") + mempooldat1 = os.path.join(self.nodes[1].datadir, self.chain, "mempool.dat") self.log.debug( - "Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it") + "Remove the mempool.dat file. Verify that savemempool to disk via RPC" + " re-creates it" + ) os.remove(mempooldat0) result0 = self.nodes[0].savemempool() assert os.path.isfile(mempooldat0) - assert_equal(result0['filename'], mempooldat0) + assert_equal(result0["filename"], mempooldat0) self.log.debug( - "Stop nodes, make node1 use mempool.dat from node0. Verify it has 6 transactions") + "Stop nodes, make node1 use mempool.dat from node0. Verify it has 6" + " transactions" + ) os.rename(mempooldat0, mempooldat1) self.stop_nodes() self.start_node(1, extra_args=["-persistmempool"]) assert self.nodes[1].getmempoolinfo()["loaded"] assert_equal(len(self.nodes[1].getrawmempool()), 6) self.log.debug( - "Prevent bitcoind from writing mempool.dat to disk. Verify that `savemempool` fails") + "Prevent bitcoind from writing mempool.dat to disk. Verify that" + " `savemempool` fails" + ) # to test the exception we are creating a tmp folder called mempool.dat.new # which is an implementation detail that could change and break this # test mempooldotnew1 = f"{mempooldat1}.new" os.mkdir(mempooldotnew1) - assert_raises_rpc_error(-1, "Unable to dump mempool to disk", - self.nodes[1].savemempool) + assert_raises_rpc_error( + -1, "Unable to dump mempool to disk", self.nodes[1].savemempool + ) os.rmdir(mempooldotnew1) self.test_persist_unbroadcast() def test_persist_unbroadcast(self): node0 = self.nodes[0] self.start_node(0) # clear out mempool self.generate(node0, 1, sync_fun=self.no_op) # ensure node0 doesn't have any connections # make a transaction that will remain in the unbroadcast set assert len(node0.getpeerinfo()) == 0 assert len(node0.p2ps) == 0 node0.sendtoaddress(self.nodes[1].getnewaddress(), Decimal("12")) # shutdown, then startup with wallet disabled self.stop_nodes() self.start_node(0, extra_args=["-disablewallet"]) # check that txn gets broadcast due to unbroadcast logic conn = node0.add_p2p_connection(P2PTxInvStore()) # 15 min + 1 for buffer node0.mockscheduler(16 * 60) self.wait_until(lambda: len(conn.get_invs()) == 1) -if __name__ == '__main__': +if __name__ == "__main__": MempoolPersistTest().main() diff --git a/test/functional/mempool_reorg.py b/test/functional/mempool_reorg.py index 16679f5d0..fbc300e1b 100755 --- a/test/functional/mempool_reorg.py +++ b/test/functional/mempool_reorg.py @@ -1,159 +1,167 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test mempool re-org scenarios. Test re-org scenarios with a mempool that contains transactions that spend (directly or indirectly) coinbase transactions. """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error from test_framework.wallet import MiniWallet class MempoolCoinbaseTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [ # immediate tx relay - ['-whitelist=noban@127.0.0.1', ], - [] + [ + "-whitelist=noban@127.0.0.1", + ], + [], ] def run_test(self): wallet = MiniWallet(self.nodes[0]) # Start with a 200 block chain assert_equal(self.nodes[0].getblockcount(), 200) self.log.info("Add 4 coinbase utxos to the miniwallet") # Block 76 contains the first spendable coinbase txs. first_block = 76 wallet.rescan_utxos() # Three scenarios for re-orging coinbase spends in the memory pool: # 1. Direct coinbase spend : spend_1 # 2. Indirect (coinbase spend in chain, child in mempool) : spend_2 and spend_2_1 # 3. Indirect (coinbase and child both in chain) : spend_3 and spend_3_1 # Use invalidateblock to make all of the above coinbase spends invalid (immature coinbase), # and make sure the mempool code behaves correctly. - b = [ - self.nodes[0].getblockhash(n) for n in range( - first_block, - first_block + 4)] - coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b] + b = [self.nodes[0].getblockhash(n) for n in range(first_block, first_block + 4)] + coinbase_txids = [self.nodes[0].getblock(h)["tx"][0] for h in b] utxo_1 = wallet.get_utxo(txid=coinbase_txids[1]) utxo_2 = wallet.get_utxo(txid=coinbase_txids[2]) utxo_3 = wallet.get_utxo(txid=coinbase_txids[3]) self.log.info( - "Create three transactions spending from coinbase utxos: spend_1, spend_2, spend_3") + "Create three transactions spending from coinbase utxos: spend_1, spend_2," + " spend_3" + ) spend_1 = wallet.create_self_transfer( - from_node=self.nodes[0], utxo_to_spend=utxo_1) + from_node=self.nodes[0], utxo_to_spend=utxo_1 + ) spend_2 = wallet.create_self_transfer( - from_node=self.nodes[0], utxo_to_spend=utxo_2) + from_node=self.nodes[0], utxo_to_spend=utxo_2 + ) spend_3 = wallet.create_self_transfer( - from_node=self.nodes[0], utxo_to_spend=utxo_3) + from_node=self.nodes[0], utxo_to_spend=utxo_3 + ) self.log.info( - "Create another transaction which is time-locked to two blocks in the future") + "Create another transaction which is time-locked to two blocks in the" + " future" + ) utxo = wallet.get_utxo(txid=coinbase_txids[0]) timelock_tx = wallet.create_self_transfer( from_node=self.nodes[0], utxo_to_spend=utxo, locktime=self.nodes[0].getblockcount() + 2, - )['hex'] + )["hex"] - self.log.info( - "Check that the time-locked transaction is too immature to spend") - assert_raises_rpc_error(-26, - "non-final", - self.nodes[0].sendrawtransaction, - timelock_tx) + self.log.info("Check that the time-locked transaction is too immature to spend") + assert_raises_rpc_error( + -26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx + ) self.log.info("Broadcast and mine spend_2 and spend_3") - wallet.sendrawtransaction( - from_node=self.nodes[0], - tx_hex=spend_2['hex']) - wallet.sendrawtransaction( - from_node=self.nodes[0], - tx_hex=spend_3['hex']) + wallet.sendrawtransaction(from_node=self.nodes[0], tx_hex=spend_2["hex"]) + wallet.sendrawtransaction(from_node=self.nodes[0], tx_hex=spend_3["hex"]) self.log.info("Generate a block") self.generate(self.nodes[0], 1) self.log.info( - "Check that time-locked transaction is still too immature to spend") - assert_raises_rpc_error(-26, - 'non-final', - self.nodes[0].sendrawtransaction, - timelock_tx) + "Check that time-locked transaction is still too immature to spend" + ) + assert_raises_rpc_error( + -26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx + ) self.log.info("Create spend_2_1 and spend_3_1") spend_2_1 = wallet.create_self_transfer( - from_node=self.nodes[0], - utxo_to_spend=spend_2["new_utxo"]) + from_node=self.nodes[0], utxo_to_spend=spend_2["new_utxo"] + ) spend_3_1 = wallet.create_self_transfer( - from_node=self.nodes[0], - utxo_to_spend=spend_3["new_utxo"]) + from_node=self.nodes[0], utxo_to_spend=spend_3["new_utxo"] + ) self.log.info("Broadcast and mine spend_3_1") - spend_3_1_id = self.nodes[0].sendrawtransaction(spend_3_1['hex']) + spend_3_1_id = self.nodes[0].sendrawtransaction(spend_3_1["hex"]) self.log.info("Generate a block") last_block = self.generate(self.nodes[0], 1) # generate() implicitly syncs blocks, so that peer 1 gets the block # before timelock_tx. # Otherwise, peer 1 would put the timelock_tx in m_recent_rejects self.log.info("The time-locked transaction can now be spent") timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx) self.log.info("Add spend_1 and spend_2_1 to the mempool") - spend_1_id = self.nodes[0].sendrawtransaction(spend_1['hex']) - spend_2_1_id = self.nodes[0].sendrawtransaction(spend_2_1['hex']) + spend_1_id = self.nodes[0].sendrawtransaction(spend_1["hex"]) + spend_2_1_id = self.nodes[0].sendrawtransaction(spend_2_1["hex"]) - assert_equal(set(self.nodes[0].getrawmempool()), { - spend_1_id, spend_2_1_id, timelock_tx_id}) + assert_equal( + set(self.nodes[0].getrawmempool()), + {spend_1_id, spend_2_1_id, timelock_tx_id}, + ) self.sync_all() # save acceptance heights of 2 of the txs to later test that they are # preserved across reorgs spend_1_height = self.nodes[0].getmempoolentry(spend_1_id)["height"] - spend_2_1_height = self.nodes[0].getmempoolentry(spend_2_1_id)[ - "height"] + spend_2_1_height = self.nodes[0].getmempoolentry(spend_2_1_id)["height"] self.log.info("invalidate the last block") for node in self.nodes: node.invalidateblock(last_block[0]) self.log.info( - "The time-locked transaction is now too immature and has been removed from the mempool") + "The time-locked transaction is now too immature and has been removed from" + " the mempool" + ) self.log.info( - "spend_3_1 has been re-orged out of the chain and is back in the mempool") - assert_equal(set(self.nodes[0].getrawmempool()), { - spend_1_id, spend_2_1_id, spend_3_1_id}) + "spend_3_1 has been re-orged out of the chain and is back in the mempool" + ) + assert_equal( + set(self.nodes[0].getrawmempool()), {spend_1_id, spend_2_1_id, spend_3_1_id} + ) # now ensure that the acceptance height of the two txs was preserved # across reorgs (and is not the same as the current tip height) tip_height = self.nodes[0].getblockchaininfo()["blocks"] assert spend_1_height != tip_height assert spend_2_1_height != tip_height assert_equal( - spend_1_height, self.nodes[0].getmempoolentry(spend_1_id)["height"]) + spend_1_height, self.nodes[0].getmempoolentry(spend_1_id)["height"] + ) assert_equal( - spend_2_1_height, self.nodes[0].getmempoolentry(spend_2_1_id)["height"]) + spend_2_1_height, self.nodes[0].getmempoolentry(spend_2_1_id)["height"] + ) # The new resurrected tx should just have height equal to current tip # height - assert_equal( - tip_height, self.nodes[0].getmempoolentry(spend_3_1_id)["height"]) + assert_equal(tip_height, self.nodes[0].getmempoolentry(spend_3_1_id)["height"]) self.log.info( - "Use invalidateblock to re-org back and make all those coinbase spends immature/invalid") + "Use invalidateblock to re-org back and make all those coinbase spends" + " immature/invalid" + ) b = self.nodes[0].getblockhash(first_block + 100) for node in self.nodes: node.invalidateblock(b) self.log.info("Check that the mempool is empty") assert_equal(set(self.nodes[0].getrawmempool()), set()) -if __name__ == '__main__': +if __name__ == "__main__": MempoolCoinbaseTest().main() diff --git a/test/functional/mempool_resurrect.py b/test/functional/mempool_resurrect.py index d529e9e73..8debd5cae 100755 --- a/test/functional/mempool_resurrect.py +++ b/test/functional/mempool_resurrect.py @@ -1,72 +1,70 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test resurrection of mined transactions when the blockchain is re-organized.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal from test_framework.wallet import MiniWallet class MempoolCoinbaseTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def run_test(self): node = self.nodes[0] wallet = MiniWallet(node) # Add enough mature utxos to the wallet so that all txs spend confirmed # coins self.generate(wallet, 3) self.generate(node, 100) # Spend block 1/2/3's coinbase transactions # Mine a block # Create three more transactions, spending the spends # Mine another block # ... make sure all the transactions are confirmed # Invalidate both blocks # ... make sure all the transactions are put back in the mempool # Mine a new block # ... make sure all the transactions are confirmed again blocks = [] spends1_ids = [ - wallet.send_self_transfer( - from_node=node)['txid'] for _ in range(3)] + wallet.send_self_transfer(from_node=node)["txid"] for _ in range(3) + ] blocks.extend(self.generate(node, 1)) spends2_ids = [ - wallet.send_self_transfer( - from_node=node)['txid'] for _ in range(3)] + wallet.send_self_transfer(from_node=node)["txid"] for _ in range(3) + ] blocks.extend(self.generate(node, 1)) spends_ids = set(spends1_ids + spends2_ids) # mempool should be empty, all txns confirmed assert_equal(set(node.getrawmempool()), set()) confirmed_txns = set( - node.getblock( - blocks[0])['tx'] + - node.getblock( - blocks[1])['tx']) + node.getblock(blocks[0])["tx"] + node.getblock(blocks[1])["tx"] + ) # Checks that all spend txns are contained in the mined blocks assert spends_ids < confirmed_txns # Use invalidateblock to re-org back node.invalidateblock(blocks[0]) # All txns should be back in mempool with 0 confirmations assert_equal(set(node.getrawmempool()), spends_ids) # Generate another block, they should all get mined blocks = self.generate(node, 1) # mempool should be empty, all txns confirmed assert_equal(set(node.getrawmempool()), set()) - confirmed_txns = set(node.getblock(blocks[0])['tx']) + confirmed_txns = set(node.getblock(blocks[0])["tx"]) assert spends_ids < confirmed_txns -if __name__ == '__main__': +if __name__ == "__main__": MempoolCoinbaseTest().main() diff --git a/test/functional/mempool_spend_coinbase.py b/test/functional/mempool_spend_coinbase.py index 1d08ee97c..98a27efd5 100755 --- a/test/functional/mempool_spend_coinbase.py +++ b/test/functional/mempool_spend_coinbase.py @@ -1,74 +1,70 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test spending coinbase transactions. The coinbase transaction in block N can appear in block N+100... so is valid in the mempool when the best block height is N+99. This test makes sure coinbase spends that will be mature in the next block are accepted into the memory pool, but less mature coinbase spends are NOT. """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error from test_framework.wallet import MiniWallet class MempoolSpendCoinbaseTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 def run_test(self): wallet = MiniWallet(self.nodes[0]) # Invalidate two blocks, so that miniwallet has access to a coin that # will mature in the next block chain_height = 198 - self.nodes[0].invalidateblock( - self.nodes[0].getblockhash( - chain_height + 1)) + self.nodes[0].invalidateblock(self.nodes[0].getblockhash(chain_height + 1)) assert_equal(chain_height, self.nodes[0].getblockcount()) wallet.rescan_utxos() # Coinbase at height chain_height-100+1 ok in mempool, should # get mined. Coinbase at height chain_height-100+2 is # too immature to spend. def coinbase_txid(h): - return self.nodes[0].getblock( - self.nodes[0].getblockhash(h))['tx'][0] + return self.nodes[0].getblock(self.nodes[0].getblockhash(h))["tx"][0] - utxo_mature = wallet.get_utxo( - txid=coinbase_txid( - chain_height - 100 + 1)) - utxo_immature = wallet.get_utxo( - txid=coinbase_txid( - chain_height - 100 + 2)) + utxo_mature = wallet.get_utxo(txid=coinbase_txid(chain_height - 100 + 1)) + utxo_immature = wallet.get_utxo(txid=coinbase_txid(chain_height - 100 + 2)) spend_mature_id = wallet.send_self_transfer( - from_node=self.nodes[0], utxo_to_spend=utxo_mature)["txid"] + from_node=self.nodes[0], utxo_to_spend=utxo_mature + )["txid"] # other coinbase should be too immature to spend immature_tx = wallet.create_self_transfer( - from_node=self.nodes[0], - utxo_to_spend=utxo_immature) - assert_raises_rpc_error(-26, - "bad-txns-premature-spend-of-coinbase", - lambda: self.nodes[0].sendrawtransaction(immature_tx['hex'])) + from_node=self.nodes[0], utxo_to_spend=utxo_immature + ) + assert_raises_rpc_error( + -26, + "bad-txns-premature-spend-of-coinbase", + lambda: self.nodes[0].sendrawtransaction(immature_tx["hex"]), + ) # mempool should have just the mature one assert_equal(self.nodes[0].getrawmempool(), [spend_mature_id]) # mine a block, mature one should get confirmed self.generate(self.nodes[0], 1) assert_equal(set(self.nodes[0].getrawmempool()), set()) # ... and now previously immature can be spent: - spend_new_id = self.nodes[0].sendrawtransaction(immature_tx['hex']) + spend_new_id = self.nodes[0].sendrawtransaction(immature_tx["hex"]) assert_equal(self.nodes[0].getrawmempool(), [spend_new_id]) -if __name__ == '__main__': +if __name__ == "__main__": MempoolSpendCoinbaseTest().main() diff --git a/test/functional/mempool_unbroadcast.py b/test/functional/mempool_unbroadcast.py index e1cf72a8b..36f71b2b3 100755 --- a/test/functional/mempool_unbroadcast.py +++ b/test/functional/mempool_unbroadcast.py @@ -1,129 +1,135 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test that the mempool ensures transaction delivery by periodically sending to peers until a GETDATA is received.""" import time from test_framework.blocktools import create_confirmed_utxos from test_framework.p2p import P2PTxInvStore from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal # 15 minutes in seconds MAX_INITIAL_BROADCAST_DELAY = 15 * 60 class MempoolUnbroadcastTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): self.test_broadcast() self.test_txn_removal() def test_broadcast(self): self.log.info( - "Test that mempool reattempts delivery of locally submitted transaction") + "Test that mempool reattempts delivery of locally submitted transaction" + ) node = self.nodes[0] min_relay_fee = node.getnetworkinfo()["relayfee"] create_confirmed_utxos(self, node, 10) self.disconnect_nodes(node.index, 1) self.log.info("Generate transactions that only node 0 knows about") # generate a wallet txn addr = node.getnewaddress() wallet_tx_hsh = node.sendtoaddress(addr, 100) utxos = node.listunspent() # generate a txn using sendrawtransaction us0 = utxos.pop() inputs = [{"txid": us0["txid"], "vout": us0["vout"]}] outputs = {addr: 100} tx = node.createrawtransaction(inputs, outputs) node.settxfee(min_relay_fee) txF = node.fundrawtransaction(tx) txFS = node.signrawtransactionwithwallet(txF["hex"]) rpc_tx_hsh = node.sendrawtransaction(txFS["hex"]) # check transactions are in unbroadcast using rpc mempoolinfo = self.nodes[0].getmempoolinfo() - assert_equal(mempoolinfo['unbroadcastcount'], 2) + assert_equal(mempoolinfo["unbroadcastcount"], 2) mempool = self.nodes[0].getrawmempool(True) for tx in mempool: - assert_equal(mempool[tx]['unbroadcast'], True) + assert_equal(mempool[tx]["unbroadcast"], True) # check that second node doesn't have these two txns mempool = self.nodes[1].getrawmempool() assert rpc_tx_hsh not in mempool assert wallet_tx_hsh not in mempool # ensure that unbroadcast txs are persisted to mempool.dat self.restart_node(0) self.log.info("Reconnect nodes & check if they are sent to node 1") self.connect_nodes(node.index, 1) # fast forward into the future & ensure that the second node has the # txns node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY) self.sync_mempools(timeout=30) mempool = self.nodes[1].getrawmempool() assert rpc_tx_hsh in mempool assert wallet_tx_hsh in mempool # check that transactions are no longer in first node's unbroadcast set mempool = self.nodes[0].getrawmempool(True) for tx in mempool: - assert_equal(mempool[tx]['unbroadcast'], False) + assert_equal(mempool[tx]["unbroadcast"], False) self.log.info( - "Add another connection & ensure transactions aren't broadcast again") + "Add another connection & ensure transactions aren't broadcast again" + ) conn = node.add_p2p_connection(P2PTxInvStore()) node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY) # allow sufficient time for possibility of broadcast time.sleep(2) assert_equal(len(conn.get_invs()), 0) self.disconnect_nodes(node.index, 1) node.disconnect_p2ps() self.log.info( - "Rebroadcast transaction and ensure it is not added to unbroadcast set when already in mempool") + "Rebroadcast transaction and ensure it is not added to unbroadcast set when" + " already in mempool" + ) rpc_tx_hsh = node.sendrawtransaction(txFS["hex"]) mempool = node.getrawmempool(True) assert rpc_tx_hsh in mempool - assert not mempool[rpc_tx_hsh]['unbroadcast'] + assert not mempool[rpc_tx_hsh]["unbroadcast"] def test_txn_removal(self): self.log.info( - "Test that transactions removed from mempool are removed from unbroadcast set") + "Test that transactions removed from mempool are removed from" + " unbroadcast set" + ) node = self.nodes[0] # since the node doesn't have any connections, it will not receive # any GETDATAs & thus the transaction will remain in the unbroadcast # set. addr = node.getnewaddress() txhsh = node.sendtoaddress(addr, 100) # check transaction was removed from unbroadcast set due to presence in # a block removal_reason = ( f"Removed {txhsh} from set of unbroadcast txns before confirmation that " "txn was sent out" ) with node.assert_debug_log([removal_reason]): self.generate(node, 1, sync_fun=self.no_op) if __name__ == "__main__": MempoolUnbroadcastTest().main() diff --git a/test/functional/mempool_updatefromblock.py b/test/functional/mempool_updatefromblock.py index 3fccb990e..99e239baa 100755 --- a/test/functional/mempool_updatefromblock.py +++ b/test/functional/mempool_updatefromblock.py @@ -1,172 +1,173 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test mempool descendants/ancestors information update. Test mempool update of transaction descendants/ancestors information (count, size) when transactions have been re-added from a disconnected block to the mempool. """ import time from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class MempoolUpdateFromBlockTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.limit_ancestor_descendant_count = 60 self.extra_args = [ [ - '-limitdescendantsize=5000', - '-limitancestorsize=5000', - f'-limitancestorcount={self.limit_ancestor_descendant_count}', - f'-limitdescendantcount={self.limit_ancestor_descendant_count}', - '-deprecatedrpc=mempool_ancestors_descendants', + "-limitdescendantsize=5000", + "-limitancestorsize=5000", + f"-limitancestorcount={self.limit_ancestor_descendant_count}", + f"-limitdescendantcount={self.limit_ancestor_descendant_count}", + "-deprecatedrpc=mempool_ancestors_descendants", ], ] def skip_test_if_missing_module(self): self.skip_if_no_wallet() - def transaction_graph_test(self, size, n_tx_to_mine=None, - start_input_txid='', end_address='', - fee=Decimal(1000)): + def transaction_graph_test( + self, + size, + n_tx_to_mine=None, + start_input_txid="", + end_address="", + fee=Decimal(1000), + ): """Create an acyclic tournament (a type of directed graph) of transactions and use it for testing. Keyword arguments: size -- the order N of the tournament which is equal to the number of the created transactions n_tx_to_mine -- the number of transaction that should be mined into a block If all of the N created transactions tx[0]..tx[N-1] reside in the mempool, the following holds: the tx[K] transaction: - has N-K descendants (including this one), and - has K+1 ancestors (including this one) More details: https://en.wikipedia.org/wiki/Tournament_(graph_theory) """ if not start_input_txid: - start_input_txid = self.nodes[0].getblock( - self.nodes[0].getblockhash(1))['tx'][0] + start_input_txid = self.nodes[0].getblock(self.nodes[0].getblockhash(1))[ + "tx" + ][0] if not end_address: end_address = self.nodes[0].getnewaddress() - first_block_hash = '' + first_block_hash = "" tx_id = [] tx_size = [] - self.log.info(f'Creating {size} transactions...') + self.log.info(f"Creating {size} transactions...") for i in range(0, size): - self.log.debug(f'Preparing transaction #{i}...') + self.log.debug(f"Preparing transaction #{i}...") # Prepare inputs. if i == 0: - inputs = [{'txid': start_input_txid, 'vout': 0}] - inputs_value = self.nodes[0].gettxout( - start_input_txid, 0)['value'] + inputs = [{"txid": start_input_txid, "vout": 0}] + inputs_value = self.nodes[0].gettxout(start_input_txid, 0)["value"] else: inputs = [] inputs_value = 0 for j, tx in enumerate(tx_id[0:i]): # Transaction tx[K] is a child of each of previous # transactions tx[0]..tx[K-1] at their output K-1. vout = i - j - 1 - inputs.append({'txid': tx_id[j], 'vout': vout}) - inputs_value += self.nodes[0].gettxout(tx, vout)['value'] + inputs.append({"txid": tx_id[j], "vout": vout}) + inputs_value += self.nodes[0].gettxout(tx, vout)["value"] - self.log.debug(f'inputs={inputs}') - self.log.debug(f'inputs_value={inputs_value}') + self.log.debug(f"inputs={inputs}") + self.log.debug(f"inputs_value={inputs_value}") # Prepare outputs. tx_count = i + 1 if tx_count < size: # Transaction tx[K] is an ancestor of each of subsequent # transactions tx[K+1]..tx[N-1]. n_outputs = size - tx_count - output_value = ( - (inputs_value - - fee) / - Decimal(n_outputs)).quantize( - Decimal('0.01')) + output_value = ((inputs_value - fee) / Decimal(n_outputs)).quantize( + Decimal("0.01") + ) outputs = {} for _ in range(n_outputs): outputs[self.nodes[0].getnewaddress()] = output_value else: - output_value = ( - inputs_value - - fee).quantize( - Decimal('0.01')) + output_value = (inputs_value - fee).quantize(Decimal("0.01")) outputs = {end_address: output_value} - self.log.debug(f'output_value={output_value}') - self.log.debug(f'outputs={outputs}') + self.log.debug(f"output_value={output_value}") + self.log.debug(f"outputs={outputs}") # Create a new transaction. - unsigned_raw_tx = self.nodes[0].createrawtransaction( - inputs, outputs) - signed_raw_tx = self.nodes[0].signrawtransactionwithwallet( - unsigned_raw_tx) - tx_id.append( - self.nodes[0].sendrawtransaction( - signed_raw_tx['hex'])) - tx_size.append(self.nodes[0].getmempoolentry(tx_id[-1])['size']) + unsigned_raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) + signed_raw_tx = self.nodes[0].signrawtransactionwithwallet(unsigned_raw_tx) + tx_id.append(self.nodes[0].sendrawtransaction(signed_raw_tx["hex"])) + tx_size.append(self.nodes[0].getmempoolentry(tx_id[-1])["size"]) if tx_count in n_tx_to_mine: # The created transactions are mined into blocks by batches. self.log.info( - f'The batch of {len(self.nodes[0].getrawmempool())} transactions ' - 'has been accepted into the mempool.') + f"The batch of {len(self.nodes[0].getrawmempool())} transactions " + "has been accepted into the mempool." + ) block_hash = self.generate(self.nodes[0], 1)[0] if not first_block_hash: first_block_hash = block_hash assert_equal(len(self.nodes[0].getrawmempool()), 0) self.log.info( - 'All of the transactions from the current batch have been' - ' mined into a block.') + "All of the transactions from the current batch have been" + " mined into a block." + ) elif tx_count == size: # At the end all of the mined blocks are invalidated, and all of the created # transactions should be re-added from disconnected blocks to # the mempool. self.log.info( - f'The last batch of {len(self.nodes[0].getrawmempool())} ' - 'transactions has been accepted into the mempool.') + f"The last batch of {len(self.nodes[0].getrawmempool())} " + "transactions has been accepted into the mempool." + ) start = time.time() self.nodes[0].invalidateblock(first_block_hash) end = time.time() assert_equal(len(self.nodes[0].getrawmempool()), size) self.log.info( - f'All of the recently mined transactions have been re-added into ' - f'the mempool in {end - start} seconds.') + "All of the recently mined transactions have been re-added into " + f"the mempool in {end - start} seconds." + ) self.log.info( - 'Checking descendants/ancestors properties of all of the' - ' in-mempool transactions...') + "Checking descendants/ancestors properties of all of the" + " in-mempool transactions..." + ) for k, tx in enumerate(tx_id): - self.log.debug(f'Check transaction #{k}.') + self.log.debug(f"Check transaction #{k}.") entry = self.nodes[0].getmempoolentry(tx) - assert_equal(entry['descendantcount'], size - k) - assert_equal(entry['descendantsize'], sum(tx_size[k:size])) - assert_equal(entry['ancestorcount'], k + 1) - assert_equal(entry['ancestorsize'], sum(tx_size[0:(k + 1)])) + assert_equal(entry["descendantcount"], size - k) + assert_equal(entry["descendantsize"], sum(tx_size[k:size])) + assert_equal(entry["ancestorcount"], k + 1) + assert_equal(entry["ancestorsize"], sum(tx_size[0 : (k + 1)])) def run_test(self): # Mine the transactions in batches so we get reorg_depth blocks # reorg'ed reorg_depth = 4 self.transaction_graph_test( size=self.limit_ancestor_descendant_count, n_tx_to_mine=range( 0, self.limit_ancestor_descendant_count, self.limit_ancestor_descendant_count // reorg_depth, ), ) -if __name__ == '__main__': +if __name__ == "__main__": MempoolUpdateFromBlockTest().main() diff --git a/test/functional/mining_basic.py b/test/functional/mining_basic.py index 8e67e2378..227b74c43 100755 --- a/test/functional/mining_basic.py +++ b/test/functional/mining_basic.py @@ -1,285 +1,328 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test mining RPCs - getmininginfo - getblocktemplate proposal mode - submitblock""" import copy from decimal import Decimal from test_framework.blocktools import TIME_GENESIS_BLOCK, create_coinbase from test_framework.messages import BLOCK_HEADER_SIZE, CBlock, CBlockHeader from test_framework.p2p import P2PDataStore from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error VERSIONBITS_TOP_BITS = 0x20000000 def assert_template(node, block, expect, rehash=True): if rehash: block.hashMerkleRoot = block.calc_merkle_root() rsp = node.getblocktemplate( - template_request={ - 'data': block.serialize().hex(), - 'mode': 'proposal'}) + template_request={"data": block.serialize().hex(), "mode": "proposal"} + ) assert_equal(rsp, expect) class MiningTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True self.supports_cli = False def mine_chain(self): - self.log.info('Create some old blocks') + self.log.info("Create some old blocks") node = self.nodes[0] address = node.get_deterministic_priv_key().address - for t in range(TIME_GENESIS_BLOCK, - TIME_GENESIS_BLOCK + 200 * 600, 600): + for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 600, 600): node.setmocktime(t) self.generatetoaddress(node, 1, address, sync_fun=self.no_op) mining_info = node.getmininginfo() - assert_equal(mining_info['blocks'], 200) - assert_equal(mining_info['currentblocktx'], 0) - assert_equal(mining_info['currentblocksize'], 1000) + assert_equal(mining_info["blocks"], 200) + assert_equal(mining_info["currentblocktx"], 0) + assert_equal(mining_info["currentblocksize"], 1000) - self.log.info('test blockversion') - self.restart_node( - 0, extra_args=[f'-mocktime={t}', '-blockversion=1337']) + self.log.info("test blockversion") + self.restart_node(0, extra_args=[f"-mocktime={t}", "-blockversion=1337"]) self.connect_nodes(0, 1) - assert_equal(1337, self.nodes[0].getblocktemplate()['version']) - self.restart_node(0, extra_args=[f'-mocktime={t}']) + assert_equal(1337, self.nodes[0].getblocktemplate()["version"]) + self.restart_node(0, extra_args=[f"-mocktime={t}"]) self.connect_nodes(0, 1) - assert_equal( - VERSIONBITS_TOP_BITS, - self.nodes[0].getblocktemplate()['version']) + assert_equal(VERSIONBITS_TOP_BITS, self.nodes[0].getblocktemplate()["version"]) self.restart_node(0) self.connect_nodes(0, 1) def run_test(self): self.mine_chain() node = self.nodes[0] def assert_submitblock(block, result_str_1, result_str_2=None): block.solve() - result_str_2 = result_str_2 or 'duplicate-invalid' - assert_equal(result_str_1, node.submitblock( - hexdata=block.serialize().hex())) - assert_equal(result_str_2, node.submitblock( - hexdata=block.serialize().hex())) - - self.log.info('getmininginfo') + result_str_2 = result_str_2 or "duplicate-invalid" + assert_equal( + result_str_1, node.submitblock(hexdata=block.serialize().hex()) + ) + assert_equal( + result_str_2, node.submitblock(hexdata=block.serialize().hex()) + ) + + self.log.info("getmininginfo") mining_info = node.getmininginfo() - assert_equal(mining_info['blocks'], 200) - assert_equal(mining_info['chain'], self.chain) - assert 'currentblocktx' not in mining_info - assert 'currentblocksize' not in mining_info - assert_equal(mining_info['difficulty'], - Decimal('4.656542373906925E-10')) - assert_equal(mining_info['networkhashps'], - Decimal('0.003333333333333334')) - assert_equal(mining_info['pooledtx'], 0) + assert_equal(mining_info["blocks"], 200) + assert_equal(mining_info["chain"], self.chain) + assert "currentblocktx" not in mining_info + assert "currentblocksize" not in mining_info + assert_equal(mining_info["difficulty"], Decimal("4.656542373906925E-10")) + assert_equal(mining_info["networkhashps"], Decimal("0.003333333333333334")) + assert_equal(mining_info["pooledtx"], 0) # Mine a block to leave initial block download - self.generatetoaddress( - node, 1, node.get_deterministic_priv_key().address) + self.generatetoaddress(node, 1, node.get_deterministic_priv_key().address) tmpl = node.getblocktemplate() self.log.info("getblocktemplate: Test capability advertised") - assert 'proposal' in tmpl['capabilities'] + assert "proposal" in tmpl["capabilities"] next_height = int(tmpl["height"]) coinbase_tx = create_coinbase(height=next_height) # sequence numbers must not be max for nLockTime to have effect - coinbase_tx.vin[0].nSequence = 2 ** 32 - 2 + coinbase_tx.vin[0].nSequence = 2**32 - 2 coinbase_tx.rehash() block = CBlock() block.nVersion = tmpl["version"] block.hashPrevBlock = int(tmpl["previousblockhash"], 16) block.nTime = tmpl["curtime"] block.nBits = int(tmpl["bits"], 16) block.nNonce = 0 block.vtx = [coinbase_tx] self.log.info("getblocktemplate: Test valid block") assert_template(node, block, None) self.log.info("submitblock: Test block decode failure") - assert_raises_rpc_error(-22, "Block decode failed", - node.submitblock, block.serialize()[:-15].hex()) + assert_raises_rpc_error( + -22, "Block decode failed", node.submitblock, block.serialize()[:-15].hex() + ) - self.log.info( - "getblocktemplate: Test bad input hash for coinbase transaction") + self.log.info("getblocktemplate: Test bad input hash for coinbase transaction") bad_block = copy.deepcopy(block) bad_block.vtx[0].vin[0].prevout.txid += 1 bad_block.vtx[0].rehash() - assert_template(node, bad_block, 'bad-cb-missing') + assert_template(node, bad_block, "bad-cb-missing") self.log.info("submitblock: Test invalid coinbase transaction") - assert_raises_rpc_error(-22, "Block does not start with a coinbase", - node.submitblock, bad_block.serialize().hex()) + assert_raises_rpc_error( + -22, + "Block does not start with a coinbase", + node.submitblock, + bad_block.serialize().hex(), + ) self.log.info("getblocktemplate: Test truncated final transaction") - assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, { - 'data': block.serialize()[:-1].hex(), 'mode': 'proposal'}) + assert_raises_rpc_error( + -22, + "Block decode failed", + node.getblocktemplate, + {"data": block.serialize()[:-1].hex(), "mode": "proposal"}, + ) self.log.info("getblocktemplate: Test duplicate transaction") bad_block = copy.deepcopy(block) bad_block.vtx.append(bad_block.vtx[0]) - assert_template(node, bad_block, 'bad-txns-duplicate') - assert_submitblock(bad_block, 'bad-txns-duplicate', - 'bad-txns-duplicate') + assert_template(node, bad_block, "bad-txns-duplicate") + assert_submitblock(bad_block, "bad-txns-duplicate", "bad-txns-duplicate") self.log.info("getblocktemplate: Test invalid transaction") bad_block = copy.deepcopy(block) bad_tx = copy.deepcopy(bad_block.vtx[0]) bad_tx.vin[0].prevout.txid = 255 bad_tx.rehash() bad_block.vtx.append(bad_tx) - assert_template(node, bad_block, 'bad-txns-inputs-missingorspent') - assert_submitblock(bad_block, 'bad-txns-inputs-missingorspent') + assert_template(node, bad_block, "bad-txns-inputs-missingorspent") + assert_submitblock(bad_block, "bad-txns-inputs-missingorspent") self.log.info("getblocktemplate: Test nonfinal transaction") bad_block = copy.deepcopy(block) - bad_block.vtx[0].nLockTime = 2 ** 32 - 1 + bad_block.vtx[0].nLockTime = 2**32 - 1 bad_block.vtx[0].rehash() - assert_template(node, bad_block, 'bad-txns-nonfinal') - assert_submitblock(bad_block, 'bad-txns-nonfinal') + assert_template(node, bad_block, "bad-txns-nonfinal") + assert_submitblock(bad_block, "bad-txns-nonfinal") self.log.info("getblocktemplate: Test bad tx count") # The tx count is immediately after the block header bad_block_sn = bytearray(block.serialize()) assert_equal(bad_block_sn[BLOCK_HEADER_SIZE], 1) bad_block_sn[BLOCK_HEADER_SIZE] += 1 - assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, { - 'data': bad_block_sn.hex(), 'mode': 'proposal'}) + assert_raises_rpc_error( + -22, + "Block decode failed", + node.getblocktemplate, + {"data": bad_block_sn.hex(), "mode": "proposal"}, + ) self.log.info("getblocktemplate: Test bad bits") bad_block = copy.deepcopy(block) bad_block.nBits = 469762303 # impossible in the real world - assert_template(node, bad_block, 'bad-diffbits') + assert_template(node, bad_block, "bad-diffbits") self.log.info("getblocktemplate: Test bad merkle root") bad_block = copy.deepcopy(block) bad_block.hashMerkleRoot += 1 - assert_template(node, bad_block, 'bad-txnmrklroot', False) - assert_submitblock(bad_block, 'bad-txnmrklroot', 'bad-txnmrklroot') + assert_template(node, bad_block, "bad-txnmrklroot", False) + assert_submitblock(bad_block, "bad-txnmrklroot", "bad-txnmrklroot") self.log.info("getblocktemplate: Test bad timestamps") bad_block = copy.deepcopy(block) - bad_block.nTime = 2 ** 31 - 1 - assert_template(node, bad_block, 'time-too-new') - assert_submitblock(bad_block, 'time-too-new', 'time-too-new') + bad_block.nTime = 2**31 - 1 + assert_template(node, bad_block, "time-too-new") + assert_submitblock(bad_block, "time-too-new", "time-too-new") bad_block.nTime = 0 - assert_template(node, bad_block, 'time-too-old') - assert_submitblock(bad_block, 'time-too-old', 'time-too-old') + assert_template(node, bad_block, "time-too-old") + assert_submitblock(bad_block, "time-too-old", "time-too-old") self.log.info("getblocktemplate: Test not best block") bad_block = copy.deepcopy(block) bad_block.hashPrevBlock = 123 - assert_template(node, bad_block, 'inconclusive-not-best-prevblk') - assert_submitblock(bad_block, 'prev-blk-not-found', - 'prev-blk-not-found') - - self.log.info('submitheader tests') - assert_raises_rpc_error(-22, 'Block header decode failed', - lambda: node.submitheader(hexdata='xx' * BLOCK_HEADER_SIZE)) - assert_raises_rpc_error(-22, 'Block header decode failed', - lambda: node.submitheader(hexdata='ff' * (BLOCK_HEADER_SIZE - 2))) - assert_raises_rpc_error(-25, 'Must submit previous header', - lambda: node.submitheader(hexdata=super(CBlock, bad_block).serialize().hex())) + assert_template(node, bad_block, "inconclusive-not-best-prevblk") + assert_submitblock(bad_block, "prev-blk-not-found", "prev-blk-not-found") + + self.log.info("submitheader tests") + assert_raises_rpc_error( + -22, + "Block header decode failed", + lambda: node.submitheader(hexdata="xx" * BLOCK_HEADER_SIZE), + ) + assert_raises_rpc_error( + -22, + "Block header decode failed", + lambda: node.submitheader(hexdata="ff" * (BLOCK_HEADER_SIZE - 2)), + ) + assert_raises_rpc_error( + -25, + "Must submit previous header", + lambda: node.submitheader( + hexdata=super(CBlock, bad_block).serialize().hex() + ), + ) block.nTime += 1 block.solve() - def chain_tip(b_hash, *, status='headers-only', branchlen=1): - return {'hash': b_hash, 'height': 202, - 'branchlen': branchlen, 'status': status} + def chain_tip(b_hash, *, status="headers-only", branchlen=1): + return { + "hash": b_hash, + "height": 202, + "branchlen": branchlen, + "status": status, + } assert chain_tip(block.hash) not in node.getchaintips() node.submitheader(hexdata=block.serialize().hex()) assert chain_tip(block.hash) in node.getchaintips() # Noop node.submitheader(hexdata=CBlockHeader(block).serialize().hex()) assert chain_tip(block.hash) in node.getchaintips() bad_block_root = copy.deepcopy(block) bad_block_root.hashMerkleRoot += 2 bad_block_root.solve() assert chain_tip(bad_block_root.hash) not in node.getchaintips() - node.submitheader(hexdata=CBlockHeader( - bad_block_root).serialize().hex()) + node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex()) assert chain_tip(bad_block_root.hash) in node.getchaintips() # Should still reject invalid blocks, even if we have the header: - assert_equal(node.submitblock( - hexdata=bad_block_root.serialize().hex()), 'bad-txnmrklroot') - assert_equal(node.submitblock( - hexdata=bad_block_root.serialize().hex()), 'bad-txnmrklroot') + assert_equal( + node.submitblock(hexdata=bad_block_root.serialize().hex()), + "bad-txnmrklroot", + ) + assert_equal( + node.submitblock(hexdata=bad_block_root.serialize().hex()), + "bad-txnmrklroot", + ) assert chain_tip(bad_block_root.hash) in node.getchaintips() # We know the header for this invalid block, so should just return # early without error: - node.submitheader(hexdata=CBlockHeader( - bad_block_root).serialize().hex()) + node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex()) assert chain_tip(bad_block_root.hash) in node.getchaintips() bad_block_lock = copy.deepcopy(block) bad_block_lock.vtx[0].nLockTime = 2**32 - 1 bad_block_lock.vtx[0].rehash() bad_block_lock.hashMerkleRoot = bad_block_lock.calc_merkle_root() bad_block_lock.solve() - assert_equal(node.submitblock( - hexdata=bad_block_lock.serialize().hex()), 'bad-txns-nonfinal') - assert_equal(node.submitblock( - hexdata=bad_block_lock.serialize().hex()), 'duplicate-invalid') + assert_equal( + node.submitblock(hexdata=bad_block_lock.serialize().hex()), + "bad-txns-nonfinal", + ) + assert_equal( + node.submitblock(hexdata=bad_block_lock.serialize().hex()), + "duplicate-invalid", + ) # Build a "good" block on top of the submitted bad block bad_block2 = copy.deepcopy(block) bad_block2.hashPrevBlock = bad_block_lock.sha256 bad_block2.solve() - assert_raises_rpc_error(-25, 'bad-prevblk', lambda: node.submitheader( - hexdata=CBlockHeader(bad_block2).serialize().hex())) + assert_raises_rpc_error( + -25, + "bad-prevblk", + lambda: node.submitheader( + hexdata=CBlockHeader(bad_block2).serialize().hex() + ), + ) # Should reject invalid header right away bad_block_time = copy.deepcopy(block) bad_block_time.nTime = 1 bad_block_time.solve() - assert_raises_rpc_error(-25, 'time-too-old', lambda: node.submitheader( - hexdata=CBlockHeader(bad_block_time).serialize().hex())) + assert_raises_rpc_error( + -25, + "time-too-old", + lambda: node.submitheader( + hexdata=CBlockHeader(bad_block_time).serialize().hex() + ), + ) # Should ask for the block from a p2p node, if they announce the header # as well: peer = node.add_p2p_connection(P2PDataStore()) # Drop the first getheaders peer.wait_for_getheaders(timeout=5) peer.send_blocks_and_test(blocks=[block], node=node) # Must be active now: - assert chain_tip(block.hash, status='active', - branchlen=0) in node.getchaintips() + assert ( + chain_tip(block.hash, status="active", branchlen=0) in node.getchaintips() + ) # Building a few blocks should give the same results - self.generatetoaddress( - node, 10, node.get_deterministic_priv_key().address) - assert_raises_rpc_error(-25, 'time-too-old', lambda: node.submitheader( - hexdata=CBlockHeader(bad_block_time).serialize().hex())) - assert_raises_rpc_error(-25, 'bad-prevblk', lambda: node.submitheader( - hexdata=CBlockHeader(bad_block2).serialize().hex())) + self.generatetoaddress(node, 10, node.get_deterministic_priv_key().address) + assert_raises_rpc_error( + -25, + "time-too-old", + lambda: node.submitheader( + hexdata=CBlockHeader(bad_block_time).serialize().hex() + ), + ) + assert_raises_rpc_error( + -25, + "bad-prevblk", + lambda: node.submitheader( + hexdata=CBlockHeader(bad_block2).serialize().hex() + ), + ) node.submitheader(hexdata=CBlockHeader(block).serialize().hex()) - node.submitheader(hexdata=CBlockHeader( - bad_block_root).serialize().hex()) + node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex()) # valid - assert_equal(node.submitblock( - hexdata=block.serialize().hex()), 'duplicate') + assert_equal(node.submitblock(hexdata=block.serialize().hex()), "duplicate") # Sanity check that maxtries supports large integers - self.generatetoaddress(node, - 1, node.get_deterministic_priv_key().address, pow( - 2, 32)) + self.generatetoaddress( + node, 1, node.get_deterministic_priv_key().address, pow(2, 32) + ) -if __name__ == '__main__': +if __name__ == "__main__": MiningTest().main() diff --git a/test/functional/mining_getblocktemplate_longpoll.py b/test/functional/mining_getblocktemplate_longpoll.py index 9af638d68..3522ea102 100755 --- a/test/functional/mining_getblocktemplate_longpoll.py +++ b/test/functional/mining_getblocktemplate_longpoll.py @@ -1,98 +1,107 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test longpolling with getblocktemplate.""" import random import threading from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import get_rpc_proxy from test_framework.wallet import MiniWallet class LongpollThread(threading.Thread): - def __init__(self, node): threading.Thread.__init__(self) # query current longpollid templat = node.getblocktemplate() - self.longpollid = templat['longpollid'] + self.longpollid = templat["longpollid"] # create a new connection to the node, we can't use the same # connection from two threads self.node = get_rpc_proxy( - node.url, 1, timeout=600, coveragedir=node.coverage_dir) + node.url, 1, timeout=600, coveragedir=node.coverage_dir + ) def run(self): - self.node.getblocktemplate({'longpollid': self.longpollid}) + self.node.getblocktemplate({"longpollid": self.longpollid}) class GetBlockTemplateLPTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.supports_cli = False def run_test(self): self.log.info( - "Warning: this test will take about 70 seconds in the best case. Be patient.") + "Warning: this test will take about 70 seconds in the best case. Be" + " patient." + ) self.log.info( - "Test that longpollid doesn't change between successive getblocktemplate() invocations if nothing else happens") + "Test that longpollid doesn't change between successive getblocktemplate()" + " invocations if nothing else happens" + ) self.generate(self.nodes[0], 10) templat = self.nodes[0].getblocktemplate() - longpollid = templat['longpollid'] + longpollid = templat["longpollid"] # longpollid should not change between successive invocations if # nothing else happens templat2 = self.nodes[0].getblocktemplate() - assert templat2['longpollid'] == longpollid + assert templat2["longpollid"] == longpollid self.log.info("Test that longpoll waits if we do nothing") thr = LongpollThread(self.nodes[0]) thr.start() # check that thread still lives # wait 5 seconds or until thread exits thr.join(5) assert thr.is_alive() miniwallets = [MiniWallet(node) for node in self.nodes] self.log.info( - "Test that longpoll will terminate if another node generates a block") + "Test that longpoll will terminate if another node generates a block" + ) # generate a block on another node self.generate(miniwallets[1], 1) # check that thread will exit now that new transaction entered mempool # wait 5 seconds or until thread exits thr.join(5) assert not thr.is_alive() self.log.info( - "Test that longpoll will terminate if we generate a block ourselves") + "Test that longpoll will terminate if we generate a block ourselves" + ) thr = LongpollThread(self.nodes[0]) thr.start() # generate a block on own node self.generate(miniwallets[0], 1) # wait 5 seconds or until thread exits thr.join(5) assert not thr.is_alive() # Add enough mature utxos to the wallets, so that all txs spend # confirmed coins self.generate(self.nodes[0], 100) self.log.info( - "Test that introducing a new transaction into the mempool will terminate the longpoll") + "Test that introducing a new transaction into the mempool will terminate" + " the longpoll" + ) thr = LongpollThread(self.nodes[0]) thr.start() # generate a random transaction and submit it min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"] - fee_rate = min_relay_fee + Decimal('0.10') * random.randint(0, 20) - miniwallets[0].send_self_transfer(from_node=random.choice(self.nodes), - fee_rate=fee_rate) + fee_rate = min_relay_fee + Decimal("0.10") * random.randint(0, 20) + miniwallets[0].send_self_transfer( + from_node=random.choice(self.nodes), fee_rate=fee_rate + ) # after one minute, every 10 seconds the mempool is probed, so in 80 # seconds it should have returned thr.join(60 + 20) assert not thr.is_alive() -if __name__ == '__main__': +if __name__ == "__main__": GetBlockTemplateLPTest().main() diff --git a/test/functional/mining_prioritisetransaction.py b/test/functional/mining_prioritisetransaction.py index c07982736..191c2e2f2 100755 --- a/test/functional/mining_prioritisetransaction.py +++ b/test/functional/mining_prioritisetransaction.py @@ -1,220 +1,242 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the prioritisetransaction mining RPC.""" import time from test_framework.blocktools import create_confirmed_utxos, send_big_transactions # FIXME: review how this test needs to be adapted w.r.t _LEGACY_MAX_BLOCK_SIZE from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE from test_framework.messages import COIN from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error class PrioritiseTransactionTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 # TODO: remove -txindex. Currently required for getrawtransaction call # (called by calculate_fee_from_txid) - self.extra_args = [[ - "-printpriority=1", - "-acceptnonstdtxn=1", - "-txindex" - ]] * self.num_nodes + self.extra_args = [ + ["-printpriority=1", "-acceptnonstdtxn=1", "-txindex"] + ] * self.num_nodes self.supports_cli = False def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): # Test `prioritisetransaction` required parameters - assert_raises_rpc_error(-1, - "prioritisetransaction", - self.nodes[0].prioritisetransaction) - assert_raises_rpc_error(-1, - "prioritisetransaction", - self.nodes[0].prioritisetransaction, - '') - assert_raises_rpc_error(-1, - "prioritisetransaction", - self.nodes[0].prioritisetransaction, - '', - 0) + assert_raises_rpc_error( + -1, "prioritisetransaction", self.nodes[0].prioritisetransaction + ) + assert_raises_rpc_error( + -1, "prioritisetransaction", self.nodes[0].prioritisetransaction, "" + ) + assert_raises_rpc_error( + -1, "prioritisetransaction", self.nodes[0].prioritisetransaction, "", 0 + ) # Test `prioritisetransaction` invalid extra parameters - assert_raises_rpc_error(-1, - "prioritisetransaction", - self.nodes[0].prioritisetransaction, - '', - 0, - 0, - 0) + assert_raises_rpc_error( + -1, + "prioritisetransaction", + self.nodes[0].prioritisetransaction, + "", + 0, + 0, + 0, + ) # Test `prioritisetransaction` invalid `txid` - assert_raises_rpc_error(-8, - "txid must be of length 64 (not 3, for 'foo')", - self.nodes[0].prioritisetransaction, - txid='foo', - fee_delta=0) assert_raises_rpc_error( -8, - "txid must be hexadecimal string (not 'Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000')", + "txid must be of length 64 (not 3, for 'foo')", + self.nodes[0].prioritisetransaction, + txid="foo", + fee_delta=0, + ) + assert_raises_rpc_error( + -8, + ( + "txid must be hexadecimal string (not" + " 'Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000')" + ), self.nodes[0].prioritisetransaction, - txid='Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000', - fee_delta=0) + txid="Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", + fee_delta=0, + ) # Test `prioritisetransaction` invalid `dummy` - txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000' - assert_raises_rpc_error(-1, - "JSON value is not a number as expected", - self.nodes[0].prioritisetransaction, - txid, - 'foo', - 0) + txid = "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000" + assert_raises_rpc_error( + -1, + "JSON value is not a number as expected", + self.nodes[0].prioritisetransaction, + txid, + "foo", + 0, + ) assert_raises_rpc_error( -8, - "Priority is no longer supported, dummy argument to prioritisetransaction must be 0.", + ( + "Priority is no longer supported, dummy argument to" + " prioritisetransaction must be 0." + ), self.nodes[0].prioritisetransaction, txid, 1, - 0) + 0, + ) # Test `prioritisetransaction` invalid `fee_delta` - assert_raises_rpc_error(-1, - "JSON value is not an integer as expected", - self.nodes[0].prioritisetransaction, - txid=txid, - fee_delta='foo') + assert_raises_rpc_error( + -1, + "JSON value is not an integer as expected", + self.nodes[0].prioritisetransaction, + txid=txid, + fee_delta="foo", + ) - self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] + self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] utxo_count = 90 utxos = create_confirmed_utxos(self, self.nodes[0], utxo_count) txids = [] # Create 3 batches of transactions at 3 different fee rate levels range_size = utxo_count // 3 for i in range(3): txids.append([]) start_range = i * range_size end_range = start_range + range_size - txids[i] = send_big_transactions(self.nodes[0], utxos[start_range:end_range], - end_range - start_range, 10 * (i + 1)) + txids[i] = send_big_transactions( + self.nodes[0], + utxos[start_range:end_range], + end_range - start_range, + 10 * (i + 1), + ) # Make sure that the size of each group of transactions exceeds # LEGACY_MAX_BLOCK_SIZE -- otherwise the test needs to be revised to create # more transactions. mempool = self.nodes[0].getrawmempool(True) sizes = [0, 0, 0] for i in range(3): for j in txids[i]: assert j in mempool - sizes[i] += mempool[j]['size'] + sizes[i] += mempool[j]["size"] # Fail => raise utxo_count assert sizes[i] > LEGACY_MAX_BLOCK_SIZE # add a fee delta to something in the cheapest bucket and make sure it gets mined # also check that a different entry in the cheapest bucket is NOT mined self.nodes[0].prioritisetransaction( - txid=txids[0][0], fee_delta=100 * self.nodes[0].calculate_fee_from_txid(txids[0][0])) + txid=txids[0][0], + fee_delta=100 * self.nodes[0].calculate_fee_from_txid(txids[0][0]), + ) self.generate(self.nodes[0], 1) mempool = self.nodes[0].getrawmempool() self.log.info("Assert that prioritised transaction was mined") assert txids[0][0] not in mempool assert txids[0][1] in mempool confirmed_transactions = self.nodes[0].getblock( - self.nodes[0].getbestblockhash())['tx'] + self.nodes[0].getbestblockhash() + )["tx"] # Pull the highest fee-rate transaction from a block high_fee_tx = confirmed_transactions[1] # Something high-fee should have been mined! assert high_fee_tx is not None # Add a prioritisation before a tx is in the mempool (de-prioritising a # high-fee transaction so that it's now low fee). # # NOTE WELL: gettransaction returns the fee as a negative number and # as fractional coins. However, the prioritisetransaction expects a # number of satoshi to add or subtract from the actual fee. # Thus the conversation here is simply int(tx_fee*COIN) to remove all fees, and then # we add the minimum fee back. - tx_fee = self.nodes[0].gettransaction(high_fee_tx)['fee'] + tx_fee = self.nodes[0].gettransaction(high_fee_tx)["fee"] self.nodes[0].prioritisetransaction( - txid=high_fee_tx, fee_delta=int(tx_fee * COIN) + self.nodes[0].calculate_fee_from_txid(high_fee_tx)) + txid=high_fee_tx, + fee_delta=int(tx_fee * COIN) + + self.nodes[0].calculate_fee_from_txid(high_fee_tx), + ) # Add everything back to mempool self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Check to make sure our high fee rate tx is back in the mempool mempool = self.nodes[0].getrawmempool() assert high_fee_tx in mempool # Now verify the modified-high feerate transaction isn't mined before # the other high fee transactions. Keep mining until our mempool has # decreased by all the high fee size that we calculated above. - while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]): + while self.nodes[0].getmempoolinfo()["bytes"] > sizes[0] + sizes[1]: self.generate(self.nodes[0], 1, sync_fun=self.no_op) # High fee transaction should not have been mined, but other high fee rate # transactions should have been. mempool = self.nodes[0].getrawmempool() - self.log.info( - "Assert that de-prioritised transaction is still in mempool") + self.log.info("Assert that de-prioritised transaction is still in mempool") assert high_fee_tx in mempool for x in txids[2]: - if (x != high_fee_tx): + if x != high_fee_tx: assert x not in mempool # Create a free transaction. Should be rejected. utxo_list = self.nodes[0].listunspent() assert len(utxo_list) > 0 utxo = utxo_list[0] inputs = [] outputs = {} inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]}) outputs[self.nodes[0].getnewaddress()] = utxo["amount"] raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) tx_hex = self.nodes[0].signrawtransactionwithwallet(raw_tx)["hex"] tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"] # This will raise an exception due to min relay fee not being met - assert_raises_rpc_error(-26, "min relay fee not met", - self.nodes[0].sendrawtransaction, tx_hex) + assert_raises_rpc_error( + -26, "min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex + ) assert tx_id not in self.nodes[0].getrawmempool() # This is a less than 1000-byte transaction, so just set the fee # to be the minimum for a 1000-byte transaction and check that it is # accepted. self.nodes[0].prioritisetransaction( - txid=tx_id, fee_delta=int(self.relayfee * COIN)) + txid=tx_id, fee_delta=int(self.relayfee * COIN) + ) - self.log.info( - "Assert that prioritised free transaction is accepted to mempool") + self.log.info("Assert that prioritised free transaction is accepted to mempool") assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id) assert tx_id in self.nodes[0].getrawmempool() # Test that calling prioritisetransaction is sufficient to trigger # getblocktemplate to (eventually) return a new block. mock_time = int(time.time()) self.nodes[0].setmocktime(mock_time) template = self.nodes[0].getblocktemplate() self.nodes[0].prioritisetransaction( - txid=tx_id, fee_delta=-int(self.relayfee * COIN)) + txid=tx_id, fee_delta=-int(self.relayfee * COIN) + ) self.nodes[0].setmocktime(mock_time + 10) new_template = self.nodes[0].getblocktemplate() assert template != new_template -if __name__ == '__main__': +if __name__ == "__main__": PrioritiseTransactionTest().main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 24ce5fb8c..345bcfe99 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -1,936 +1,1062 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Run regression test suite. This module calls down into individual test cases via subprocess. It will forward all unrecognized arguments onto the individual test scripts. For a description of arguments recognized by test scripts, see `test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`. """ import argparse import configparser import datetime import json import logging import multiprocessing import os import re import shutil import subprocess import sys import tempfile import threading import time import unittest import xml.etree.ElementTree as ET from collections import deque from queue import Empty, Queue from typing import Set # Formatting. Default colors to empty strings. BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") try: # Make sure python thinks it can write unicode to its stdout "\u2713".encode("utf_8").decode(sys.stdout.encoding) TICK = "✓ " CROSS = "✖ " CIRCLE = "○ " except UnicodeDecodeError: TICK = "P " CROSS = "x " CIRCLE = "o " -if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393): # type: ignore - if os.name == 'nt': +if os.name != "nt" or sys.getwindowsversion() >= (10, 0, 14393): # type: ignore + if os.name == "nt": import ctypes + kernel32 = ctypes.windll.kernel32 # type: ignore ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4 STD_OUTPUT_HANDLE = -11 STD_ERROR_HANDLE = -12 # Enable ascii color control to stdout stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE) stdout_mode = ctypes.c_int32() kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode)) kernel32.SetConsoleMode( - stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING) + stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING + ) # Enable ascii color control to stderr stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE) stderr_mode = ctypes.c_int32() kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode)) kernel32.SetConsoleMode( - stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING) + stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING + ) # primitive formatting on supported # terminal via ANSI escape sequences: - BOLD = ('\033[0m', '\033[1m') - GREEN = ('\033[0m', '\033[0;32m') - RED = ('\033[0m', '\033[0;31m') - GREY = ('\033[0m', '\033[1;30m') + BOLD = ("\033[0m", "\033[1m") + GREEN = ("\033[0m", "\033[0;32m") + RED = ("\033[0m", "\033[0;31m") + GREY = ("\033[0m", "\033[1;30m") TEST_EXIT_PASSED = 0 TEST_EXIT_SKIPPED = 77 TEST_FRAMEWORK_MODULES = [ "address", "blocktools", "messages", "muhash", "script", "txtools", "util", ] NON_SCRIPTS = { # These are python files that live in the functional tests directory, but # are not test scripts. "combine_logs.py", "create_cache.py", "test_runner.py", } EXTRA_PRIVILEGES_TESTS = [ # These tests can only run with extra privileges. # They need to be excluded from the timing file because they are not # designed to run in the same context as the other tests. "interface_usdt_net.py", "interface_usdt_utxocache.py", "interface_usdt_validation.py", ] TEST_PARAMS = { # Some test can be run with additional parameters. # When a test is listed here, then it will be run without parameter as well # as with additional parameters listed here. # This: # example "testName" : [["--param1", "--param2"] , ["--param3"]] # will run the test 3 times: # testName # testName --param1 --param2 # testname --param3 "rpc_bind.py": [["--ipv4"], ["--ipv6"], ["--nonloopback"]], "rpc_createmultisig.py": [["--descriptors"]], "rpc_deriveaddresses.py": [["--usecli"]], "rpc_fundrawtransaction.py": [["--descriptors"]], "rpc_rawtransaction.py": [["--descriptors"]], "rpc_signrawtransaction.py": [["--descriptors"]], # FIXME: "rpc_psbt.py": [["--descriptors"]], "wallet_address_types.py": [["--descriptors"]], "tool_wallet.py": [["--descriptors"]], "wallet_avoidreuse.py": [["--descriptors"]], "wallet_balance.py": [["--descriptors"]], # FIXME: "wallet_basic.py": [["--descriptors"]], "wallet_createwallet.py": [["--usecli"], ["--descriptors"]], "wallet_encryption.py": [["--descriptors"]], "wallet_hd.py": [["--descriptors"]], "wallet_importprunedfunds.py": [["--descriptors"]], # FIXME: "wallet_keypool.py": [["--descriptors"]], "wallet_keypool_topup.py": [["--descriptors"]], "wallet_labels.py": [["--descriptors"]], "wallet_listsinceblock.py": [["--descriptors"]], "wallet_listtransactions.py": [["--descriptors"]], "wallet_multiwallet.py": [["--usecli"]], "wallet_txn_doublespend.py": [["--mineblock"]], "wallet_txn_clone.py": [["--mineblock"]], "wallet_watchonly.py": [["--usecli"]], } # Used to limit the number of tests, when list of tests is not provided on command line # When --extended is specified, we run all tests, otherwise # we only run a test if its execution time in seconds does not exceed # EXTENDED_CUTOFF DEFAULT_EXTENDED_CUTOFF = 40 DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1 def bold(text) -> str: return f"{BOLD[1]}{text}{BOLD[0]}" class TestCase: """ Data structure to hold and run information necessary to launch a test case. """ - def __init__(self, test_num, test_case, tests_dir, - tmpdir, failfast_event, flags=None): + def __init__( + self, test_num, test_case, tests_dir, tmpdir, failfast_event, flags=None + ): self.tests_dir = tests_dir self.tmpdir = tmpdir self.test_case = test_case self.test_num = test_num self.failfast_event = failfast_event self.flags = flags def run(self): if self.failfast_event.is_set(): - return TestResult(self.test_num, self.test_case, - "", "Skipped", 0, "", "") + return TestResult(self.test_num, self.test_case, "", "Skipped", 0, "", "") portseed = self.test_num portseed_arg = [f"--portseed={portseed}"] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) test_argv = self.test_case.split() - testname = re.sub('.py$', '', test_argv[0]) + testname = re.sub(".py$", "", test_argv[0]) testdir = os.path.join(f"{self.tmpdir}", f"{testname}_{portseed}") tmpdir_arg = [f"--tmpdir={testdir}"] start_time = time.time() - process = subprocess.Popen([sys.executable, os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg, - universal_newlines=True, - stdout=log_stdout, - stderr=log_stderr) + process = subprocess.Popen( + [sys.executable, os.path.join(self.tests_dir, test_argv[0])] + + test_argv[1:] + + self.flags + + portseed_arg + + tmpdir_arg, + universal_newlines=True, + stdout=log_stdout, + stderr=log_stderr, + ) process.wait() log_stdout.seek(0), log_stderr.seek(0) - [stdout, stderr] = [log.read().decode('utf-8') - for log in (log_stdout, log_stderr)] + [stdout, stderr] = [ + log.read().decode("utf-8") for log in (log_stdout, log_stderr) + ] log_stdout.close(), log_stderr.close() if process.returncode == TEST_EXIT_PASSED and stderr == "": status = "Passed" elif process.returncode == TEST_EXIT_SKIPPED: status = "Skipped" else: status = "Failed" - return TestResult(self.test_num, self.test_case, testdir, status, - time.time() - start_time, stdout, stderr) + return TestResult( + self.test_num, + self.test_case, + testdir, + status, + time.time() - start_time, + stdout, + stderr, + ) def on_ci(): - return os.getenv('TRAVIS') == 'true' or os.getenv( - 'TEAMCITY_VERSION') is not None + return os.getenv("TRAVIS") == "true" or os.getenv("TEAMCITY_VERSION") is not None def main(): # Read config generated by configure. config = configparser.ConfigParser() - configfile = os.path.join(os.path.abspath( - os.path.dirname(__file__)), "..", "config.ini") + configfile = os.path.join( + os.path.abspath(os.path.dirname(__file__)), "..", "config.ini" + ) config.read_file(open(configfile, encoding="utf8")) src_dir = config["environment"]["SRCDIR"] build_dir = config["environment"]["BUILDDIR"] - tests_dir = os.path.join(src_dir, 'test', 'functional') + tests_dir = os.path.join(src_dir, "test", "functional") # SRCDIR must be set for cdefs.py to find and parse consensus.h os.environ["SRCDIR"] = src_dir # Parse arguments and pass through unrecognised args - parser = argparse.ArgumentParser(add_help=False, - usage='%(prog)s [test_runner.py options] [script options] [scripts]', - description=__doc__, - epilog=''' - Help text and arguments for individual test script:''', - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('--combinedlogslen', '-c', type=int, default=0, metavar='n', - help='On failure, print a log (of length n lines) to ' - 'the console, combined from the test framework ' - 'and all test nodes.') - parser.add_argument('--coverage', action='store_true', - help='generate a basic coverage report for the RPC interface') + parser = argparse.ArgumentParser( + add_help=False, + usage="%(prog)s [test_runner.py options] [script options] [scripts]", + description=__doc__, + epilog=""" + Help text and arguments for individual test script:""", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) parser.add_argument( - '--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.') - parser.add_argument('--extended', action='store_true', - help='run the extended test suite in addition to the basic tests') - parser.add_argument('--cutoff', type=int, default=DEFAULT_EXTENDED_CUTOFF, - help='set the cutoff runtime for what tests get run') - parser.add_argument('--help', '-h', '-?', - action='store_true', help='print help text and exit') - parser.add_argument('--jobs', '-j', type=int, default=DEFAULT_JOBS, - help='how many test scripts to run in parallel.') - parser.add_argument('--keepcache', '-k', action='store_true', - help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.') - parser.add_argument('--quiet', '-q', action='store_true', - help='only print results summary and failure logs') - parser.add_argument('--tmpdirprefix', '-t', - default=os.path.join(build_dir, 'test', 'tmp'), help="Root directory for datadirs") + "--combinedlogslen", + "-c", + type=int, + default=0, + metavar="n", + help=( + "On failure, print a log (of length n lines) to " + "the console, combined from the test framework " + "and all test nodes." + ), + ) + parser.add_argument( + "--coverage", + action="store_true", + help="generate a basic coverage report for the RPC interface", + ) parser.add_argument( - '--failfast', - action='store_true', - help='stop execution after the first test failure') - parser.add_argument('--junitoutput', '-J', - help="File that will store JUnit formatted test results. If no absolute path is given it is treated as relative to the temporary directory.") - parser.add_argument('--testsuitename', '-n', default='Bitcoin ABC functional tests', - help="Name of the test suite, as it will appear in the logs and in the JUnit report.") + "--exclude", "-x", help="specify a comma-separated-list of scripts to exclude." + ) + parser.add_argument( + "--extended", + action="store_true", + help="run the extended test suite in addition to the basic tests", + ) + parser.add_argument( + "--cutoff", + type=int, + default=DEFAULT_EXTENDED_CUTOFF, + help="set the cutoff runtime for what tests get run", + ) + parser.add_argument( + "--help", "-h", "-?", action="store_true", help="print help text and exit" + ) + parser.add_argument( + "--jobs", + "-j", + type=int, + default=DEFAULT_JOBS, + help="how many test scripts to run in parallel.", + ) + parser.add_argument( + "--keepcache", + "-k", + action="store_true", + help=( + "the default behavior is to flush the cache directory on startup." + " --keepcache retains the cache from the previous testrun." + ), + ) + parser.add_argument( + "--quiet", + "-q", + action="store_true", + help="only print results summary and failure logs", + ) + parser.add_argument( + "--tmpdirprefix", + "-t", + default=os.path.join(build_dir, "test", "tmp"), + help="Root directory for datadirs", + ) + parser.add_argument( + "--failfast", + action="store_true", + help="stop execution after the first test failure", + ) + parser.add_argument( + "--junitoutput", + "-J", + help=( + "File that will store JUnit formatted test results. If no absolute path is" + " given it is treated as relative to the temporary directory." + ), + ) + parser.add_argument( + "--testsuitename", + "-n", + default="Bitcoin ABC functional tests", + help=( + "Name of the test suite, as it will appear in the logs and in the JUnit" + " report." + ), + ) args, unknown_args = parser.parse_known_args() # args to be passed on always start with two dashes; tests are the # remaining unknown args tests = [arg for arg in unknown_args if arg[:2] != "--"] passon_args = [arg for arg in unknown_args if arg[:2] == "--"] passon_args.append(f"--configfile={configfile}") # Set up logging logging_level = logging.INFO if args.quiet else logging.DEBUG - logging.basicConfig(format='%(message)s', level=logging_level) + logging.basicConfig(format="%(message)s", level=logging_level) logging.info(f"Starting {args.testsuitename}") # Create base test directory - tmpdir = os.path.join(f"{args.tmpdirprefix}", - f"test_runner_₿₵_🏃_{datetime.datetime.now():%Y%m%d_%H%M%S}") + tmpdir = os.path.join( + f"{args.tmpdirprefix}", + f"test_runner_₿₵_🏃_{datetime.datetime.now():%Y%m%d_%H%M%S}", + ) os.makedirs(tmpdir) logging.debug(f"Temporary test directory at {tmpdir}") if args.junitoutput and not os.path.isabs(args.junitoutput): args.junitoutput = os.path.join(tmpdir, args.junitoutput) enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND") if not enable_bitcoind: print("No functional tests to run.") print("Rerun ./configure with --with-daemon and then make") sys.exit(0) # Build list of tests all_scripts = get_all_scripts_from_disk(tests_dir, NON_SCRIPTS) # Check all tests with parameters actually exist for test in TEST_PARAMS: if test not in all_scripts: - print(f"ERROR: Test with parameter {test} does not exist, check it has " - "not been renamed or deleted") + print( + f"ERROR: Test with parameter {test} does not exist, check it has " + "not been renamed or deleted" + ) sys.exit(1) if tests: # Individual tests have been specified. Run specified tests that exist # in the all_scripts list. Accept the name with or without .py # extension. individual_tests = [ - re.sub(r"\.py$", "", test) + ".py" for test in tests if not test.endswith('*')] + re.sub(r"\.py$", "", test) + ".py" + for test in tests + if not test.endswith("*") + ] test_list = [] for test in individual_tests: if test in all_scripts: test_list.append(test) else: print(f"{bold('WARNING!')} Test '{test}' not found in full test list.") # Allow for wildcard at the end of the name, so a single input can # match multiple tests for test in tests: - if test.endswith('*'): - test_list.extend( - [t for t in all_scripts if t.startswith(test[:-1])]) + if test.endswith("*"): + test_list.extend([t for t in all_scripts if t.startswith(test[:-1])]) # do not cut off explicitly specified tests cutoff = sys.maxsize else: # Run base tests only test_list = all_scripts cutoff = sys.maxsize if args.extended else args.cutoff # Remove the test cases that the user has explicitly asked to exclude. if args.exclude: - exclude_tests = [re.sub(r"\.py$", "", test) - + (".py" if ".py" not in test else "") for test in args.exclude.split(',')] + exclude_tests = [ + re.sub(r"\.py$", "", test) + (".py" if ".py" not in test else "") + for test in args.exclude.split(",") + ] for exclude_test in exclude_tests: if exclude_test in test_list: test_list.remove(exclude_test) else: - print(f"{bold('WARNING!')} Test '{exclude_test}' not found in current " - f"test list.") + print( + f"{bold('WARNING!')} Test '{exclude_test}' not found in current " + "test list." + ) # Update timings from build_dir only if separate build directory is used. # We do not want to pollute source directory. build_timings = None - if (src_dir != build_dir): - build_timings = Timings(os.path.join(build_dir, 'timing.json')) + if src_dir != build_dir: + build_timings = Timings(os.path.join(build_dir, "timing.json")) # Always use timings from src_dir if present - src_timings = Timings(os.path.join( - src_dir, "test", "functional", 'timing.json')) + src_timings = Timings(os.path.join(src_dir, "test", "functional", "timing.json")) # Add test parameters and remove long running tests if needed - test_list = get_tests_to_run( - test_list, TEST_PARAMS, cutoff, src_timings) + test_list = get_tests_to_run(test_list, TEST_PARAMS, cutoff, src_timings) if not test_list: - print("No valid test scripts specified. Check that your test is in one " - "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") + print( + "No valid test scripts specified. Check that your test is in one of the" + " test lists in test_runner.py, or run test_runner.py with no arguments to" + " run all tests" + ) sys.exit(0) if args.help: # Print help for test_runner.py, then print help of the first script # and exit. parser.print_help() subprocess.check_call( - [sys.executable, os.path.join(tests_dir, test_list[0]), '-h']) + [sys.executable, os.path.join(tests_dir, test_list[0]), "-h"] + ) sys.exit(0) check_script_prefixes(all_scripts) if not args.keepcache: - shutil.rmtree(os.path.join(build_dir, "test", - "cache"), ignore_errors=True) + shutil.rmtree(os.path.join(build_dir, "test", "cache"), ignore_errors=True) run_tests( test_list, build_dir, tests_dir, args.junitoutput, tmpdir, num_jobs=args.jobs, test_suite_name=args.testsuitename, enable_coverage=args.coverage, args=passon_args, combined_logs_len=args.combinedlogslen, build_timings=build_timings, - failfast=args.failfast + failfast=args.failfast, ) -def run_tests(test_list, build_dir, tests_dir, junitoutput, tmpdir, num_jobs, test_suite_name, - enable_coverage=False, args=None, combined_logs_len=0, build_timings=None, failfast=False): +def run_tests( + test_list, + build_dir, + tests_dir, + junitoutput, + tmpdir, + num_jobs, + test_suite_name, + enable_coverage=False, + args=None, + combined_logs_len=0, + build_timings=None, + failfast=False, +): args = args or [] # Warn if bitcoind is already running try: # pgrep exits with code zero when one or more matching processes found - if subprocess.run(["pgrep", "-x", "bitcoind"], - stdout=subprocess.DEVNULL).returncode == 0: + if ( + subprocess.run( + ["pgrep", "-x", "bitcoind"], stdout=subprocess.DEVNULL + ).returncode + == 0 + ): print( f"{bold('WARNING!')} There is already a bitcoind process running on " - f"this system. Tests may fail unexpectedly due to resource contention!") + "this system. Tests may fail unexpectedly due to resource contention!" + ) except OSError: # pgrep not supported pass # Warn if there is a cache directory cache_dir = os.path.join(build_dir, "test", "cache") if os.path.isdir(cache_dir): - print(f"{bold('WARNING!')} There is a cache directory here: {cache_dir}. " - "If tests fail unexpectedly, try deleting the cache directory.") + print( + f"{bold('WARNING!')} There is a cache directory here: {cache_dir}. " + "If tests fail unexpectedly, try deleting the cache directory." + ) # Test Framework Tests print("Running Unit Tests for Test Framework Modules") test_framework_tests = unittest.TestSuite() for module in TEST_FRAMEWORK_MODULES: test_framework_tests.addTest( - unittest.TestLoader().loadTestsFromName(f"test_framework.{module}")) - result = unittest.TextTestRunner( - verbosity=1, failfast=True).run(test_framework_tests) + unittest.TestLoader().loadTestsFromName(f"test_framework.{module}") + ) + result = unittest.TextTestRunner(verbosity=1, failfast=True).run( + test_framework_tests + ) if not result.wasSuccessful(): - logging.debug( - "Early exiting after failure in TestFramework unit tests") + logging.debug("Early exiting after failure in TestFramework unit tests") sys.exit(False) - flags = [f'--cachedir={cache_dir}'] + args + flags = [f"--cachedir={cache_dir}"] + args if enable_coverage: coverage = RPCCoverage() flags.append(coverage.flag) - logging.debug( - f"Initializing coverage directory at {coverage.dir}") + logging.debug(f"Initializing coverage directory at {coverage.dir}") else: coverage = None if len(test_list) > 1 and num_jobs > 1: # Populate cache try: subprocess.check_output( - [sys.executable, os.path.join(tests_dir, 'create_cache.py')] + flags + - [os.path.join(f"--tmpdir={tmpdir}", "cache")]) + [sys.executable, os.path.join(tests_dir, "create_cache.py")] + + flags + + [os.path.join(f"--tmpdir={tmpdir}", "cache")] + ) except subprocess.CalledProcessError as e: sys.stdout.buffer.write(e.output) raise # Run Tests start_time = time.time() test_results = execute_test_processes( - num_jobs, test_list, tests_dir, tmpdir, flags, failfast) + num_jobs, test_list, tests_dir, tmpdir, flags, failfast + ) runtime = time.time() - start_time max_len_name = len(max(test_list, key=len)) - print_results(test_results, tests_dir, max_len_name, - runtime, combined_logs_len) + print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len) if junitoutput is not None: - save_results_as_junit( - test_results, - junitoutput, - runtime, - test_suite_name) + save_results_as_junit(test_results, junitoutput, runtime, test_suite_name) - if (build_timings is not None): + if build_timings is not None: build_timings.save_timings(test_results) if coverage: coverage_passed = coverage.report_rpc_coverage() logging.debug("Cleaning up coverage data") coverage.cleanup() else: coverage_passed = True # Clear up the temp directory if all subdirectories are gone if not os.listdir(tmpdir): os.rmdir(tmpdir) all_passed = all(res.was_successful for res in test_results) and coverage_passed sys.exit(not all_passed) def execute_test_processes( - num_jobs, test_list, tests_dir, tmpdir, flags, failfast=False): + num_jobs, test_list, tests_dir, tmpdir, flags, failfast=False +): update_queue = Queue() job_queue = Queue() failfast_event = threading.Event() test_results = [] poll_timeout = 10 # seconds ## # Define some helper functions we will need for threading. ## def handle_message(message, running_jobs): """ handle_message handles a single message from handle_test_cases """ if isinstance(message, TestCase): running_jobs.append((message.test_num, message.test_case)) print(f"{bold(message.test_case)} started") return if isinstance(message, TestResult): test_result = message running_jobs.remove((test_result.num, test_result.name)) test_results.append(test_result) if test_result.status == "Passed": - print(f"{bold(test_result.name)} passed, " - f"Duration: {TimeResolution.seconds(test_result.time)} s") + print( + f"{bold(test_result.name)} passed, " + f"Duration: {TimeResolution.seconds(test_result.time)} s" + ) elif test_result.status == "Skipped": print(f"{bold(test_result.name)} skipped") else: - print(f"{bold(test_result.name)} failed, " - f"Duration: {TimeResolution.seconds(test_result.time)} s\n") - print(bold('stdout:')) + print( + f"{bold(test_result.name)} failed, " + f"Duration: {TimeResolution.seconds(test_result.time)} s\n" + ) + print(bold("stdout:")) print(test_result.stdout) - print(bold('stderr:')) + print(bold("stderr:")) print(test_result.stderr) if failfast: logging.debug("Early exiting after test failure") failfast_event.set() return assert False, "we should not be here" def handle_update_messages(): """ handle_update_messages waits for messages to be sent from handle_test_cases via the update_queue. It serializes the results so we can print nice status update messages. """ printed_status = False running_jobs = [] while True: message = None try: message = update_queue.get(True, poll_timeout) if message is None: break # We printed a status message, need to kick to the next line # before printing more. if printed_status: print() printed_status = False handle_message(message, running_jobs) update_queue.task_done() except Empty: if not on_ci(): jobs = ", ".join([j[1] for j in running_jobs]) print(f"Running jobs: {jobs}", end="\r") sys.stdout.flush() printed_status = True def handle_test_cases(): """ job_runner represents a single thread that is part of a worker pool. It waits for a test, then executes that test. It also reports start and result messages to handle_update_messages """ while True: test = job_queue.get() if test is None: break # Signal that the test is starting to inform the poor waiting # programmer update_queue.put(test) result = test.run() update_queue.put(result) job_queue.task_done() ## # Setup our threads, and start sending tasks ## # Start our result collection thread. resultCollector = threading.Thread(target=handle_update_messages) resultCollector.daemon = True resultCollector.start() # Start some worker threads for _ in range(num_jobs): t = threading.Thread(target=handle_test_cases) t.daemon = True t.start() # Push all our test cases into the job queue. for i, t in enumerate(test_list): job_queue.put(TestCase(i, t, tests_dir, tmpdir, failfast_event, flags)) # Wait for all the jobs to be completed job_queue.join() # Wait for all the results to be compiled update_queue.join() # Flush our queues so the threads exit update_queue.put(None) for _ in range(num_jobs): job_queue.put(None) return test_results -def print_results(test_results, tests_dir, max_len_name, - runtime, combined_logs_len): +def print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len): results = bold(f"\n{'TEST':<{max_len_name}} | {'STATUS':<9} | DURATION\n\n") test_results.sort(key=TestResult.sort_key) all_passed = True time_sum = 0 for test_result in test_results: all_passed = all_passed and test_result.was_successful time_sum += test_result.time test_result.padding = max_len_name results += str(test_result) testdir = test_result.testdir if combined_logs_len and os.path.isdir(testdir): # Print the final `combinedlogslen` lines of the combined logs print( - bold(f'Combine the logs and print the last {combined_logs_len} lines ...')) - print('\n============') - print(bold(f'Combined log for {testdir}:')) - print('============\n') + bold( + f"Combine the logs and print the last {combined_logs_len} lines ..." + ) + ) + print("\n============") + print(bold(f"Combined log for {testdir}:")) + print("============\n") combined_logs_args = [ - sys.executable, os.path.join( - tests_dir, 'combine_logs.py'), testdir] + sys.executable, + os.path.join(tests_dir, "combine_logs.py"), + testdir, + ] if BOLD[0]: - combined_logs_args += ['--color'] + combined_logs_args += ["--color"] combined_logs, _ = subprocess.Popen( - combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate() - print( - "\n".join( - deque( - combined_logs.splitlines(), - combined_logs_len))) + combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE + ).communicate() + print("\n".join(deque(combined_logs.splitlines(), combined_logs_len))) status = TICK + "Passed" if all_passed else CROSS + "Failed" if not all_passed: results += RED[1] results += bold( f"\n{'ALL':<{max_len_name}} | {status:<9} | " - f"{TimeResolution.seconds(time_sum)} s (accumulated) \n") + f"{TimeResolution.seconds(time_sum)} s (accumulated) \n" + ) if not all_passed: results += RED[0] results += f"Runtime: {TimeResolution.seconds(runtime)} s\n" print(results) class TestResult: """ Simple data structure to store test result values and print them properly """ def __init__(self, num, name, testdir, status, time, stdout, stderr): self.num = num self.name = name self.testdir = testdir self.status = status self.time = time self.padding = 0 self.stdout = stdout self.stderr = stderr def sort_key(self): if self.status == "Passed": return 0, self.name.lower() elif self.status == "Failed": return 2, self.name.lower() elif self.status == "Skipped": return 1, self.name.lower() def __repr__(self): if self.status == "Passed": color = GREEN glyph = TICK elif self.status == "Failed": color = RED glyph = CROSS elif self.status == "Skipped": color = GREY glyph = CIRCLE return ( f"{color[1]}{self.name:<{self.padding}} | {glyph}{self.status:<7} | " f"{TimeResolution.seconds(self.time)} s\n{color[0]}" ) @property def was_successful(self): return self.status != "Failed" def get_all_scripts_from_disk(test_dir, non_scripts: Set[str]) -> Set[str]: """ Return all available test script from script directory (excluding NON_SCRIPTS) """ python_files = {t for t in os.listdir(test_dir) if t[-3:] == ".py"} return python_files - non_scripts def check_script_prefixes(all_scripts): """Check that no more than `EXPECTED_VIOLATION_COUNT` of the - test scripts don't start with one of the allowed name prefixes.""" + test scripts don't start with one of the allowed name prefixes.""" EXPECTED_VIOLATION_COUNT = 14 # LEEWAY is provided as a transition measure, so that pull-requests # that introduce new tests that don't conform with the naming # convention don't immediately cause the tests to fail. LEEWAY = 0 good_prefixes_re = re.compile( - "(abc_)?(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool|chronik)_") + "(abc_)?(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool|chronik)_" + ) bad_script_names = [ - script for script in all_scripts if good_prefixes_re.match(script) is None] + script for script in all_scripts if good_prefixes_re.match(script) is None + ] if len(bad_script_names) < EXPECTED_VIOLATION_COUNT: - print(f"{bold('HURRAY!')} Number of functional tests violating naming " - "convention reduced!") - print("Consider reducing EXPECTED_VIOLATION_COUNT from " - f"{EXPECTED_VIOLATION_COUNT} to {len(bad_script_names)}") + print( + f"{bold('HURRAY!')} Number of functional tests violating naming " + "convention reduced!" + ) + print( + "Consider reducing EXPECTED_VIOLATION_COUNT from " + f"{EXPECTED_VIOLATION_COUNT} to {len(bad_script_names)}" + ) elif len(bad_script_names) > EXPECTED_VIOLATION_COUNT: - print(f"INFO: {len(bad_script_names)} tests not meeting naming conventions " - f"(expected {EXPECTED_VIOLATION_COUNT}):") - formatted_bad_script_names = '\n '.join(sorted(bad_script_names)) + print( + f"INFO: {len(bad_script_names)} tests not meeting naming conventions " + f"(expected {EXPECTED_VIOLATION_COUNT}):" + ) + formatted_bad_script_names = "\n ".join(sorted(bad_script_names)) print(f" {formatted_bad_script_names}") - assert \ - len(bad_script_names) <= EXPECTED_VIOLATION_COUNT + LEEWAY, \ - f"Too many tests not following naming convention! ({len(bad_script_names)}" \ + assert len(bad_script_names) <= EXPECTED_VIOLATION_COUNT + LEEWAY, ( + f"Too many tests not following naming convention! ({len(bad_script_names)}" f" found, expected: <= {EXPECTED_VIOLATION_COUNT})" + ) def get_tests_to_run(test_list, test_params, cutoff, src_timings): """ Returns only test that will not run longer that cutoff. Long running tests are returned first to favor running tests in parallel Timings from build directory override those from src directory """ def get_test_time(test): # Return 0 if test is unknown to always run it return next( - (x['time'] for x in src_timings.existing_timings if x['name'] == test), 0) + (x["time"] for x in src_timings.existing_timings if x["name"] == test), 0 + ) # Some tests must also be run with additional parameters. Add them to the # list. tests_with_params = [] for test_name in test_list: # always execute a test without parameters tests_with_params.append(test_name) params = test_params.get(test_name) if params is not None: tests_with_params.extend( - [test_name + " " + " ".join(parameter) for parameter in params]) + [test_name + " " + " ".join(parameter) for parameter in params] + ) - result = [ - test for test in tests_with_params if get_test_time(test) <= cutoff] + result = [test for test in tests_with_params if get_test_time(test) <= cutoff] result.sort(key=lambda x: (-get_test_time(x), x)) return result class RPCCoverage: """ Coverage reporting utilities for test_runner. Coverage calculation works by having each test script subprocess write coverage files into a particular directory. These files contain the RPC commands invoked during testing, as well as a complete listing of RPC commands per `bitcoin-cli help` (`rpc_interface.txt`). After all tests complete, the commands run are combined and diff'd against the complete list to calculate uncovered RPC commands. See also: test/functional/test_framework/coverage.py """ def __init__(self): self.dir = tempfile.mkdtemp(prefix="coverage") self.flag = f"--coveragedir={self.dir}" def report_rpc_coverage(self): """ Print out RPC commands that were unexercised by tests. """ uncovered = self._get_uncovered_rpc_commands() if uncovered: print("Uncovered RPC commands:") print("".join(f" - {i}\n" for i in sorted(uncovered))) return False else: print("All RPC commands covered.") return True def cleanup(self): return shutil.rmtree(self.dir) def _get_uncovered_rpc_commands(self): """ Return a set of currently untested RPC commands. """ # This is shared from `test/functional/test_framework/coverage.py` - reference_filename = 'rpc_interface.txt' - coverage_file_prefix = 'coverage.' + reference_filename = "rpc_interface.txt" + coverage_file_prefix = "coverage." coverage_ref_filename = os.path.join(self.dir, reference_filename) coverage_filenames = set() all_cmds = set() # Consider RPC generate covered, because it is overloaded in # test_framework/test_node.py and not seen by the coverage check. - covered_cmds = set({'generate'}) + covered_cmds = set({"generate"}) if not os.path.isfile(coverage_ref_filename): raise RuntimeError("No coverage reference found") - with open(coverage_ref_filename, 'r', encoding="utf8") as file: + with open(coverage_ref_filename, "r", encoding="utf8") as file: all_cmds.update([line.strip() for line in file.readlines()]) for root, _, files in os.walk(self.dir): for filename in files: if filename.startswith(coverage_file_prefix): coverage_filenames.add(os.path.join(root, filename)) for filename in coverage_filenames: - with open(filename, 'r', encoding="utf8") as file: - covered_cmds.update([line.strip() - for line in file.readlines()]) + with open(filename, "r", encoding="utf8") as file: + covered_cmds.update([line.strip() for line in file.readlines()]) return all_cmds - covered_cmds def save_results_as_junit(test_results, file_name, time, test_suite_name): """ Save tests results to file in JUnit format See http://llg.cubic.org/docs/junit/ for specification of format """ - e_test_suite = ET.Element("testsuite", - {"name": f"{test_suite_name}", - "tests": str(len(test_results)), - # "errors": - "failures": str(len([t for t in test_results if t.status == "Failed"])), - "id": "0", - "skipped": str(len([t for t in test_results if t.status == "Skipped"])), - "time": str(TimeResolution.milliseconds(time)), - "timestamp": datetime.datetime.now().isoformat('T') - }) + e_test_suite = ET.Element( + "testsuite", + { + "name": f"{test_suite_name}", + "tests": str(len(test_results)), + # "errors": + "failures": str(len([t for t in test_results if t.status == "Failed"])), + "id": "0", + "skipped": str(len([t for t in test_results if t.status == "Skipped"])), + "time": str(TimeResolution.milliseconds(time)), + "timestamp": datetime.datetime.now().isoformat("T"), + }, + ) for test_result in test_results: - e_test_case = ET.SubElement(e_test_suite, "testcase", - {"name": test_result.name, - "classname": test_result.name, - "time": str(TimeResolution.milliseconds(test_result.time)) - } - ) + e_test_case = ET.SubElement( + e_test_suite, + "testcase", + { + "name": test_result.name, + "classname": test_result.name, + "time": str(TimeResolution.milliseconds(test_result.time)), + }, + ) if test_result.status == "Skipped": ET.SubElement(e_test_case, "skipped") elif test_result.status == "Failed": ET.SubElement(e_test_case, "failure") # no special element for passed tests ET.SubElement(e_test_case, "system-out").text = test_result.stdout ET.SubElement(e_test_case, "system-err").text = test_result.stderr - ET.ElementTree(e_test_suite).write( - file_name, "UTF-8", xml_declaration=True) + ET.ElementTree(e_test_suite).write(file_name, "UTF-8", xml_declaration=True) class Timings: """ Takes care of loading, merging and saving tests execution times. """ def __init__(self, timing_file): self.timing_file = timing_file self.existing_timings = self.load_timings() def load_timings(self): if os.path.isfile(self.timing_file): with open(self.timing_file, encoding="utf8") as file: return json.load(file) else: return [] def get_merged_timings(self, new_timings): """ Return new list containing existing timings updated with new timings Tests that do not exists are not removed """ - key = 'name' + key = "name" merged = {} for item in self.existing_timings + new_timings: if item[key] in merged: merged[item[key]].update(item) else: merged[item[key]] = item # Sort the result to preserve test ordering in file merged = list(merged.values()) merged.sort(key=lambda t, key=key: t[key]) return merged def save_timings(self, test_results): # we only save test that have passed - timings for failed test might be # wrong (timeouts or early fails), and we exclude the tests that require # extra privileges. passed_results = [ - test for test in test_results if test.status == 'Passed' and test.name not in EXTRA_PRIVILEGES_TESTS] - new_timings = [{'name': test.name, 'time': TimeResolution.seconds(test.time)} - for test in passed_results] + test + for test in test_results + if test.status == "Passed" and test.name not in EXTRA_PRIVILEGES_TESTS + ] + new_timings = [ + {"name": test.name, "time": TimeResolution.seconds(test.time)} + for test in passed_results + ] merged_timings = self.get_merged_timings(new_timings) - with open(self.timing_file, 'w', encoding="utf8") as file: + with open(self.timing_file, "w", encoding="utf8") as file: json.dump(merged_timings, file, indent=True) class TimeResolution: @staticmethod def seconds(time_fractional_second): return round(time_fractional_second) @staticmethod def milliseconds(time_fractional_second): return round(time_fractional_second, 3) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/test/functional/tool_wallet.py b/test/functional/tool_wallet.py index 56b633f24..9c1662cfa 100755 --- a/test/functional/tool_wallet.py +++ b/test/functional/tool_wallet.py @@ -1,275 +1,291 @@ #!/usr/bin/env python3 # Copyright (c) 2018-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test bitcoin-wallet.""" import hashlib import os import stat import subprocess import textwrap from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal BUFFER_SIZE = 16 * 1024 class ToolWalletTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.rpc_timeout = 120 def skip_test_if_missing_module(self): self.skip_if_no_wallet() self.skip_if_no_wallet_tool() def bitcoin_wallet_process(self, *args): - binary = self.config["environment"]["BUILDDIR"] + \ - '/src/bitcoin-wallet' + self.config["environment"]["EXEEXT"] - args = [f'-datadir={self.nodes[0].datadir}', - f'-chain={self.chain}'] + list(args) + binary = ( + self.config["environment"]["BUILDDIR"] + + "/src/bitcoin-wallet" + + self.config["environment"]["EXEEXT"] + ) + args = [f"-datadir={self.nodes[0].datadir}", f"-chain={self.chain}"] + list( + args + ) command_line = [binary] + args if self.config["environment"]["EMULATOR"]: - command_line = [ - self.config["environment"]["EMULATOR"]] + command_line + command_line = [self.config["environment"]["EMULATOR"]] + command_line - return subprocess.Popen(command_line, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + return subprocess.Popen( + command_line, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) def assert_raises_tool_error(self, error, *args): p = self.bitcoin_wallet_process(*args) stdout, stderr = p.communicate() assert_equal(p.poll(), 1) - assert_equal(stdout, '') + assert_equal(stdout, "") assert_equal(stderr.strip(), error) def assert_tool_output(self, output, *args): p = self.bitcoin_wallet_process(*args) stdout, stderr = p.communicate() - assert_equal(stderr, '') + assert_equal(stderr, "") assert_equal(stdout, output) assert_equal(p.poll(), 0) def wallet_shasum(self): h = hashlib.sha1() mv = memoryview(bytearray(BUFFER_SIZE)) - with open(self.wallet_path, 'rb', buffering=0) as f: + with open(self.wallet_path, "rb", buffering=0) as f: for n in iter(lambda: f.readinto(mv), 0): h.update(mv[:n]) return h.hexdigest() def wallet_timestamp(self): return os.path.getmtime(self.wallet_path) def wallet_permissions(self): return oct(os.lstat(self.wallet_path).st_mode)[-3:] def log_wallet_timestamp_comparison(self, old, new): - result = 'unchanged' if new == old else 'increased!' - self.log.debug(f'Wallet file timestamp {result}') + result = "unchanged" if new == old else "increased!" + self.log.debug(f"Wallet file timestamp {result}") def test_invalid_tool_commands_and_args(self): self.log.info( - 'Testing that various invalid commands raise with specific error messages') - self.assert_raises_tool_error('Invalid command: foo', 'foo') + "Testing that various invalid commands raise with specific error messages" + ) + self.assert_raises_tool_error("Invalid command: foo", "foo") # `bitcoin-wallet help` raises an error. Use `bitcoin-wallet -help`. - self.assert_raises_tool_error('Invalid command: help', 'help') + self.assert_raises_tool_error("Invalid command: help", "help") self.assert_raises_tool_error( - 'Error: two methods provided (info and create). Only one method should be provided.', - 'info', - 'create') + ( + "Error: two methods provided (info and create). Only one method should" + " be provided." + ), + "info", + "create", + ) self.assert_raises_tool_error( - 'Error parsing command line arguments: Invalid parameter -foo', '-foo') - locked_dir = os.path.join(self.options.tmpdir, "node0", "regtest", - "wallets") + "Error parsing command line arguments: Invalid parameter -foo", "-foo" + ) + locked_dir = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets") self.assert_raises_tool_error( f'Error initializing wallet database environment "{locked_dir}"!', f"-wallet={self.default_wallet_name}", - 'info', + "info", + ) + path = os.path.join( + self.options.tmpdir, "node0", "regtest", "wallets", "nonexistent.dat" ) - path = os.path.join(self.options.tmpdir, "node0", "regtest", - "wallets", "nonexistent.dat") self.assert_raises_tool_error( f"Failed to load database path '{path}'. Path does not exist.", - '-wallet=nonexistent.dat', 'info') + "-wallet=nonexistent.dat", + "info", + ) def test_tool_wallet_info(self): # Stop the node to close the wallet to call the info command. self.stop_node(0) - self.log.info('Calling wallet tool info, testing output') + self.log.info("Calling wallet tool info, testing output") # # TODO: Wallet tool info should work with wallet file permissions set to # read-only without raising: # "Error loading wallet.dat. Is wallet being used by another process?" # The following lines should be uncommented and the tests still succeed: # # self.log.debug('Setting wallet file permissions to 400 (read-only)') # os.chmod(self.wallet_path, stat.S_IRUSR) # assert self.wallet_permissions() in ['400', '666'] # Sanity check. 666 because Appveyor. # shasum_before = self.wallet_shasum() timestamp_before = self.wallet_timestamp() - self.log.debug( - f'Wallet file timestamp before calling info: {timestamp_before}') - out = textwrap.dedent('''\ + self.log.debug(f"Wallet file timestamp before calling info: {timestamp_before}") + out = textwrap.dedent( + """\ Wallet info =========== Encrypted: no HD (hd seed available): yes Keypool Size: 2 Transactions: 0 Address Book: 1 - ''') - self.assert_tool_output( - out, - f"-wallet={self.default_wallet_name}", - 'info') + """ + ) + self.assert_tool_output(out, f"-wallet={self.default_wallet_name}", "info") timestamp_after = self.wallet_timestamp() - self.log.debug( - f'Wallet file timestamp after calling info: {timestamp_after}') + self.log.debug(f"Wallet file timestamp after calling info: {timestamp_after}") self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after) - self.log.debug( - 'Setting wallet file permissions back to 600 (read/write)') + self.log.debug("Setting wallet file permissions back to 600 (read/write)") os.chmod(self.wallet_path, stat.S_IRUSR | stat.S_IWUSR) # Sanity check. 666 because Appveyor. - assert self.wallet_permissions() in ['600', '666'] + assert self.wallet_permissions() in ["600", "666"] # # TODO: Wallet tool info should not write to the wallet file. # The following lines should be uncommented and the tests still succeed: # # assert_equal(timestamp_before, timestamp_after) # shasum_after = self.wallet_shasum() # assert_equal(shasum_before, shasum_after) # self.log.debug('Wallet file shasum unchanged\n') def test_tool_wallet_info_after_transaction(self): """ Mutate the wallet with a transaction to verify that the info command output changes accordingly. """ self.start_node(0) - self.log.info('Generating transaction to mutate wallet') + self.log.info("Generating transaction to mutate wallet") self.generate(self.nodes[0], 1) self.stop_node(0) self.log.info( - 'Calling wallet tool info after generating a transaction, testing output') + "Calling wallet tool info after generating a transaction, testing output" + ) shasum_before = self.wallet_shasum() timestamp_before = self.wallet_timestamp() - self.log.debug( - f'Wallet file timestamp before calling info: {timestamp_before}') - out = textwrap.dedent('''\ + self.log.debug(f"Wallet file timestamp before calling info: {timestamp_before}") + out = textwrap.dedent( + """\ Wallet info =========== Encrypted: no HD (hd seed available): yes Keypool Size: 2 Transactions: 1 Address Book: 1 - ''') - self.assert_tool_output( - out, - f"-wallet={self.default_wallet_name}", - 'info') + """ + ) + self.assert_tool_output(out, f"-wallet={self.default_wallet_name}", "info") shasum_after = self.wallet_shasum() timestamp_after = self.wallet_timestamp() - self.log.debug( - f'Wallet file timestamp after calling info: {timestamp_after}') + self.log.debug(f"Wallet file timestamp after calling info: {timestamp_after}") self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after) # # TODO: Wallet tool info should not write to the wallet file. # This assertion should be uncommented and succeed: # assert_equal(timestamp_before, timestamp_after) assert_equal(shasum_before, shasum_after) - self.log.debug('Wallet file shasum unchanged\n') + self.log.debug("Wallet file shasum unchanged\n") def test_tool_wallet_create_on_existing_wallet(self): self.log.info( - 'Calling wallet tool create on an existing wallet, testing output') + "Calling wallet tool create on an existing wallet, testing output" + ) shasum_before = self.wallet_shasum() timestamp_before = self.wallet_timestamp() self.log.debug( - f'Wallet file timestamp before calling create: {timestamp_before}') - out = textwrap.dedent('''\ + f"Wallet file timestamp before calling create: {timestamp_before}" + ) + out = textwrap.dedent( + """\ Topping up keypool... Wallet info =========== Encrypted: no HD (hd seed available): yes Keypool Size: 2000 Transactions: 0 Address Book: 0 - ''') - self.assert_tool_output(out, '-wallet=foo', 'create') + """ + ) + self.assert_tool_output(out, "-wallet=foo", "create") shasum_after = self.wallet_shasum() timestamp_after = self.wallet_timestamp() - self.log.debug( - f'Wallet file timestamp after calling create: {timestamp_after}') + self.log.debug(f"Wallet file timestamp after calling create: {timestamp_after}") self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after) assert_equal(timestamp_before, timestamp_after) assert_equal(shasum_before, shasum_after) - self.log.debug('Wallet file shasum unchanged\n') + self.log.debug("Wallet file shasum unchanged\n") def test_getwalletinfo_on_different_wallet(self): - self.log.info('Starting node with arg -wallet=foo') - self.start_node(0, ['-nowallet', '-wallet=foo']) + self.log.info("Starting node with arg -wallet=foo") + self.start_node(0, ["-nowallet", "-wallet=foo"]) self.log.info( - 'Calling getwalletinfo on a different wallet ("foo"), testing output') + 'Calling getwalletinfo on a different wallet ("foo"), testing output' + ) shasum_before = self.wallet_shasum() timestamp_before = self.wallet_timestamp() self.log.debug( - f'Wallet file timestamp before calling getwalletinfo: {timestamp_before}') + f"Wallet file timestamp before calling getwalletinfo: {timestamp_before}" + ) out = self.nodes[0].getwalletinfo() self.stop_node(0) shasum_after = self.wallet_shasum() timestamp_after = self.wallet_timestamp() self.log.debug( - f'Wallet file timestamp after calling getwalletinfo: {timestamp_after}') + f"Wallet file timestamp after calling getwalletinfo: {timestamp_after}" + ) - assert_equal(0, out['txcount']) - assert_equal(1000, out['keypoolsize']) - assert_equal(1000, out['keypoolsize_hd_internal']) - assert_equal(True, 'hdseedid' in out) + assert_equal(0, out["txcount"]) + assert_equal(1000, out["keypoolsize"]) + assert_equal(1000, out["keypoolsize_hd_internal"]) + assert_equal(True, "hdseedid" in out) self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after) assert_equal(timestamp_before, timestamp_after) assert_equal(shasum_after, shasum_before) - self.log.debug('Wallet file shasum unchanged\n') + self.log.debug("Wallet file shasum unchanged\n") def test_salvage(self): # TODO: Check salvage actually salvages and doesn't break things. # https://github.com/bitcoin/bitcoin/issues/7463 - self.log.info('Check salvage') - self.start_node(0, ['-wallet=salvage']) + self.log.info("Check salvage") + self.start_node(0, ["-wallet=salvage"]) self.stop_node(0) - self.assert_tool_output('', '-wallet=salvage', 'salvage') + self.assert_tool_output("", "-wallet=salvage", "salvage") def run_test(self): self.wallet_path = os.path.join( self.nodes[0].datadir, self.chain, - 'wallets', + "wallets", self.default_wallet_name, - self.wallet_data_filename + self.wallet_data_filename, ) self.test_invalid_tool_commands_and_args() # Warning: The following tests are order-dependent. self.test_tool_wallet_info() self.test_tool_wallet_info_after_transaction() if not self.options.descriptors: # TODO: Wallet tool needs more create options at which point these # can be enabled. self.test_tool_wallet_create_on_existing_wallet() self.test_getwalletinfo_on_different_wallet() # Salvage is a legacy wallet only thing self.test_salvage() -if __name__ == '__main__': +if __name__ == "__main__": ToolWalletTest().main()